code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging a : Union[str, Any] = logging.get_logger(__name__) # TODO: upload to AWS a : Optional[int] = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "retribert" def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=8 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=True , snake_case=1_2_8 , snake_case=0 , **snake_case , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , **snake_case ) UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : List[str] = attention_probs_dropout_prob UpperCAmelCase : int = max_position_embeddings UpperCAmelCase : Union[str, Any] = type_vocab_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : int = layer_norm_eps UpperCAmelCase : Tuple = share_encoders UpperCAmelCase : Optional[int] = projection_dim
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' from typing import Any def lowercase ( __magic_name__ ): '''simple docstring''' if not input_list: return [] UpperCAmelCase : Tuple = [input_list.count(__magic_name__ ) for value in input_list] UpperCAmelCase : Union[str, Any] = max(__magic_name__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__magic_name__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor a : str = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , *snake_case , **snake_case ): '''simple docstring''' warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' from math import factorial, pi def lowercase ( __magic_name__ , __magic_name__ = 30 ): '''simple docstring''' if not isinstance(__magic_name__ , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) UpperCAmelCase : str = float(__magic_name__ ) UpperCAmelCase : int = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ = 30 ): '''simple docstring''' if not isinstance(__magic_name__ , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) UpperCAmelCase : int = float(__magic_name__ ) UpperCAmelCase : Optional[Any] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__magic_name__ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : Optional[int] = 1_3 UpperCAmelCase : Any = 7 UpperCAmelCase : Any = True UpperCAmelCase : List[Any] = True UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Optional[Any] = True UpperCAmelCase : str = 9_9 UpperCAmelCase : List[Any] = 3_8_4 UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : str = 3_7 UpperCAmelCase : int = "gelu" UpperCAmelCase : Any = 0.1 UpperCAmelCase : Any = 0.1 UpperCAmelCase : List[str] = 5_1_2 UpperCAmelCase : Dict = 1_6 UpperCAmelCase : Any = 2 UpperCAmelCase : Any = 0.02 UpperCAmelCase : Optional[int] = 3 UpperCAmelCase : Dict = 4 UpperCAmelCase : Any = 1_2_8 UpperCAmelCase : Optional[Any] = 2 UpperCAmelCase : int = 9 UpperCAmelCase : int = 1 UpperCAmelCase : Tuple = None def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[int] = None if self.use_input_mask: UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : str = None UpperCAmelCase : List[Any] = None UpperCAmelCase : Optional[Any] = None if self.use_labels: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Dict = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = TFConvBertModel(config=snake_case ) UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCAmelCase : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase : Optional[Any] = model(snake_case ) UpperCAmelCase : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = TFConvBertForMaskedLM(config=snake_case ) UpperCAmelCase : str = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase : Optional[int] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : List[Any] = TFConvBertForSequenceClassification(config=snake_case ) UpperCAmelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase : Any = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.num_choices UpperCAmelCase : int = TFConvBertForMultipleChoice(config=snake_case ) UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : Any = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : Tuple = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCAmelCase : Tuple = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Any = TFConvBertForTokenClassification(config=snake_case ) UpperCAmelCase : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase : Tuple = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = TFConvBertForQuestionAnswering(config=snake_case ) UpperCAmelCase : Optional[int] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase : str = model(snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : str = config_and_inputs UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ : Tuple = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : List[Any] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = TFConvBertModelTester(self ) UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = True UpperCAmelCase : Tuple = True if hasattr(snake_case , "use_cache" ): UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase : Optional[Any] = getattr(self.model_tester , "key_length" , snake_case ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = self._prepare_for_class(snake_case , snake_case ) UpperCAmelCase : List[str] = model_class(snake_case ) UpperCAmelCase : Dict = len(model(snake_case ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case , saved_model=snake_case ) UpperCAmelCase : List[Any] = os.path.join(snake_case , "saved_model" , "1" ) UpperCAmelCase : int = tf.keras.models.load_model(snake_case ) UpperCAmelCase : Optional[int] = model(snake_case ) if self.is_encoder_decoder: UpperCAmelCase : str = outputs["encoder_hidden_states"] UpperCAmelCase : str = outputs["encoder_attentions"] else: UpperCAmelCase : Dict = outputs["hidden_states"] UpperCAmelCase : str = outputs["attentions"] self.assertEqual(len(snake_case ) , snake_case ) UpperCAmelCase : List[Any] = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : str = True UpperCAmelCase : Optional[int] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase : int = getattr(self.model_tester , "key_length" , snake_case ) UpperCAmelCase : int = getattr(self.model_tester , "key_length" , snake_case ) def check_decoder_attentions_output(snake_case ): UpperCAmelCase : Any = len(snake_case ) self.assertEqual(out_len % 2 , 0 ) UpperCAmelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(snake_case ): UpperCAmelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = False UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : int = model(self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : Optional[Any] = len(snake_case ) self.assertEqual(config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) if self.is_encoder_decoder: UpperCAmelCase : int = model_class(snake_case ) UpperCAmelCase : Tuple = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(config.output_hidden_states , snake_case ) check_decoder_attentions_output(snake_case ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase : Tuple = True UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) # Check attention is always last and order is fine UpperCAmelCase : Any = True UpperCAmelCase : Optional[Any] = True UpperCAmelCase : List[str] = model_class(snake_case ) UpperCAmelCase : Tuple = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case ) ) self.assertEqual(model.config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCAmelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase : int = model(snake_case )[0] UpperCAmelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Optional[Any] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , ): '''simple docstring''' UpperCAmelCase : Any = {} if train_file is not None: UpperCAmelCase : int = [train_file] if eval_file is not None: UpperCAmelCase : Optional[Any] = [eval_file] if test_file is not None: UpperCAmelCase : Union[str, Any] = [test_file] UpperCAmelCase : Optional[Any] = datasets.load_dataset("csv" , data_files=__magic_name__ ) UpperCAmelCase : List[str] = list(ds[list(files.keys() )[0]].features.keys() ) UpperCAmelCase : Optional[int] = features_name.pop(__magic_name__ ) UpperCAmelCase : Any = list(set(ds[list(files.keys() )[0]][label_name] ) ) UpperCAmelCase : Tuple = {label: i for i, label in enumerate(__magic_name__ )} UpperCAmelCase : Union[str, Any] = tokenizer.model_input_names UpperCAmelCase : int = {} if len(__magic_name__ ) == 1: for k in files.keys(): UpperCAmelCase : Union[str, Any] = ds[k].map( lambda __magic_name__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" ) , batched=__magic_name__ , ) elif len(__magic_name__ ) == 2: for k in files.keys(): UpperCAmelCase : Dict = ds[k].map( lambda __magic_name__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" , ) , batched=__magic_name__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: UpperCAmelCase : Dict = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase : List[str] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: UpperCAmelCase : Tuple = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase : Optional[int] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: UpperCAmelCase : str = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase : str = labelaid[ex[label_name]] yield (d, label) UpperCAmelCase : int = ( tf.data.Dataset.from_generator( __magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: UpperCAmelCase : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) UpperCAmelCase : Any = ( tf.data.Dataset.from_generator( __magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: UpperCAmelCase : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) UpperCAmelCase : Any = ( tf.data.Dataset.from_generator( __magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: UpperCAmelCase : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid a : Union[str, Any] = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : int = field(metadata={"help": "Which column contains the label"} ) SCREAMING_SNAKE_CASE__ : str = field(default=lowercase__ , metadata={"help": "The path of the training file"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ , metadata={"help": "The path of the development file"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ , metadata={"help": "The path of the test file"} ) SCREAMING_SNAKE_CASE__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE__ : bool = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, " F"16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__magic_name__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): UpperCAmelCase : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) def compute_metrics(__magic_name__ ) -> Dict: UpperCAmelCase : Any = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer UpperCAmelCase : int = TFTrainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase : List[str] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase : Dict = trainer.evaluate() UpperCAmelCase : str = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__magic_name__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(F" {key} = {value}" ) writer.write(F"{key} = {value}\n" ) results.update(__magic_name__ ) return results if __name__ == "__main__": main()
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowercase ( __magic_name__ ): '''simple docstring''' if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False def lowercase ( __magic_name__ ): '''simple docstring''' for char in word: UpperCAmelCase : str = ord(__magic_name__ ) if not _is_chinese_char(__magic_name__ ): return 0 return 1 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = set() for token in tokens: UpperCAmelCase : Union[str, Any] = len(__magic_name__ ) > 1 and is_chinese(__magic_name__ ) if chinese_word: word_set.add(__magic_name__ ) UpperCAmelCase : Optional[Any] = list(__magic_name__ ) return word_list def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCAmelCase : str = max([len(__magic_name__ ) for w in chinese_word_set] ) UpperCAmelCase : Union[str, Any] = bert_tokens UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, len(__magic_name__ ) while start < end: UpperCAmelCase : List[Any] = True if is_chinese(bert_word[start] ): UpperCAmelCase : List[Any] = min(end - start , __magic_name__ ) for i in range(__magic_name__ , 1 , -1 ): UpperCAmelCase : List[Any] = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCAmelCase : Optional[int] = "##" + bert_word[j] UpperCAmelCase : int = start + i UpperCAmelCase : List[str] = False break if single_word: start += 1 return bert_word def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = [] for i in range(0 , len(__magic_name__ ) , 100 ): UpperCAmelCase : str = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws UpperCAmelCase : int = [get_chinese_word(__magic_name__ ) for r in res] ltp_res.extend(__magic_name__ ) assert len(__magic_name__ ) == len(__magic_name__ ) UpperCAmelCase : int = [] for i in range(0 , len(__magic_name__ ) , 100 ): UpperCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__magic_name__ , truncation=__magic_name__ , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__magic_name__ ) == len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(__magic_name__ , __magic_name__ ): UpperCAmelCase : Tuple = [] for id in input_ids: UpperCAmelCase : Optional[Any] = bert_tokenizer._convert_id_to_token(__magic_name__ ) input_tokens.append(__magic_name__ ) UpperCAmelCase : List[Any] = add_sub_symbol(__magic_name__ , __magic_name__ ) UpperCAmelCase : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__magic_name__ ): if token[:2] == "##": UpperCAmelCase : List[str] = token[2:] # save chinese tokens' pos if len(__magic_name__ ) == 1 and _is_chinese_char(ord(__magic_name__ ) ): ref_id.append(__magic_name__ ) ref_ids.append(__magic_name__ ) assert len(__magic_name__ ) == len(__magic_name__ ) return ref_ids def lowercase ( __magic_name__ ): '''simple docstring''' with open(args.file_name , "r" , encoding="utf-8" ) as f: UpperCAmelCase : Dict = f.readlines() UpperCAmelCase : List[str] = [line.strip() for line in data if len(__magic_name__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCAmelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) UpperCAmelCase : Dict = prepare_ref(__magic_name__ , __magic_name__ , __magic_name__ ) with open(args.save_path , "w" , encoding="utf-8" ) as f: UpperCAmelCase : Tuple = [json.dumps(__magic_name__ ) + "\n" for ref in ref_ids] f.writelines(__magic_name__ ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) a : Tuple = parser.parse_args() main(args)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self , snake_case = 1 , snake_case = None , snake_case = 5_0 , snake_case = "pil" , snake_case = True , **snake_case , ): '''simple docstring''' UpperCAmelCase : int = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case , ) UpperCAmelCase : Dict = image.to(self.device ) # set step values self.scheduler.set_timesteps(snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase : Optional[int] = self.unet(snake_case , snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCAmelCase : List[Any] = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[str] = self.numpy_to_pil(snake_case ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=snake_case ), "This is a local test"
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFInpaintingSuperResolutionPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) SCREAMING_SNAKE_CASE__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} def A_ ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def A_ ( self , snake_case , snake_case=0 ): '''simple docstring''' if str(snake_case ).startswith("mps" ): UpperCAmelCase : List[str] = torch.manual_seed(snake_case ) else: UpperCAmelCase : int = torch.Generator(device=snake_case ).manual_seed(snake_case ) UpperCAmelCase : List[str] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(snake_case ) ).to(snake_case ) UpperCAmelCase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case ) ).to(snake_case ) UpperCAmelCase : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case ) ).to(snake_case ) UpperCAmelCase : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def A_ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def A_ ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def A_ ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def A_ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def A_ ( self ): '''simple docstring''' self._test_save_load_local() def A_ ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' a : str = tuple[float, float, float] a : int = tuple[float, float, float] def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = end_pointa[0] - end_pointa[0] UpperCAmelCase : Optional[int] = end_pointa[1] - end_pointa[1] UpperCAmelCase : int = end_pointa[2] - end_pointa[2] return (x, y, z) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i UpperCAmelCase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j UpperCAmelCase : str = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return tuple(round(__magic_name__ , __magic_name__ ) for x in vector ) == (0, 0, 0) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 10 ): '''simple docstring''' UpperCAmelCase : Any = create_vector(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = create_vector(__magic_name__ , __magic_name__ ) return is_zero_vector(get_ad_vectors_cross(__magic_name__ , __magic_name__ ) , __magic_name__ )
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "facebook/bart-large-mnli" SCREAMING_SNAKE_CASE__ : List[str] = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) SCREAMING_SNAKE_CASE__ : List[str] = "text_classifier" SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE__ : str = ["text", ["text"]] SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["text"] def A_ ( self ): '''simple docstring''' super().setup() UpperCAmelCase : Optional[int] = self.model.config UpperCAmelCase : Dict = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): UpperCAmelCase : Optional[int] = int(snake_case ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = labels return self.pre_processor( [text] * len(snake_case ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = outputs.logits UpperCAmelCase : Tuple = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a : str = logging.get_logger(__name__) a : Optional[int] = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "sew" def __init__( self , snake_case=3_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case=2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1e-5 , snake_case="group" , snake_case="gelu" , snake_case=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case=False , snake_case=1_2_8 , snake_case=1_6 , snake_case=True , snake_case=0.05 , snake_case=1_0 , snake_case=2 , snake_case=0.0 , snake_case=1_0 , snake_case=0 , snake_case="mean" , snake_case=False , snake_case=False , snake_case=2_5_6 , snake_case=0 , snake_case=1 , snake_case=2 , **snake_case , ): '''simple docstring''' super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : List[Any] = feat_extract_norm UpperCAmelCase : Tuple = feat_extract_activation UpperCAmelCase : str = list(snake_case ) UpperCAmelCase : Union[str, Any] = list(snake_case ) UpperCAmelCase : Optional[int] = list(snake_case ) UpperCAmelCase : Any = conv_bias UpperCAmelCase : Optional[int] = num_conv_pos_embeddings UpperCAmelCase : Optional[Any] = num_conv_pos_embedding_groups UpperCAmelCase : Optional[int] = len(self.conv_dim ) UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : int = intermediate_size UpperCAmelCase : str = squeeze_factor UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : int = hidden_dropout UpperCAmelCase : Optional[Any] = attention_dropout UpperCAmelCase : List[str] = activation_dropout UpperCAmelCase : Dict = feat_proj_dropout UpperCAmelCase : Optional[Any] = final_dropout UpperCAmelCase : str = layerdrop UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : List[Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Union[str, Any] = apply_spec_augment UpperCAmelCase : int = mask_time_prob UpperCAmelCase : Tuple = mask_time_length UpperCAmelCase : Union[str, Any] = mask_time_min_masks UpperCAmelCase : List[Any] = mask_feature_prob UpperCAmelCase : int = mask_feature_length UpperCAmelCase : Dict = mask_feature_min_masks # ctc loss UpperCAmelCase : Dict = ctc_loss_reduction UpperCAmelCase : Tuple = ctc_zero_infinity # sequence classification UpperCAmelCase : Optional[int] = use_weighted_layer_sum UpperCAmelCase : List[Any] = classifier_proj_size @property def A_ ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ): '''simple docstring''' model.train() UpperCAmelCase : List[str] = model(__magic_name__ ) UpperCAmelCase : Union[str, Any] = F.mse_loss(__magic_name__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__=False ): '''simple docstring''' set_seed(42 ) UpperCAmelCase : str = RegressionModel() UpperCAmelCase : Optional[Any] = deepcopy(__magic_name__ ) UpperCAmelCase : Any = RegressionDataset(length=80 ) UpperCAmelCase : Optional[Any] = DataLoader(__magic_name__ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 ) UpperCAmelCase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 ) UpperCAmelCase : int = LambdaLR(__magic_name__ , lr_lambda=lambda __magic_name__ : epoch**0.6_5 ) UpperCAmelCase : List[Any] = LambdaLR(__magic_name__ , lr_lambda=lambda __magic_name__ : epoch**0.6_5 ) # Make a copy of `model` if sched: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: UpperCAmelCase , UpperCAmelCase : Dict = accelerator.prepare(__magic_name__ , __magic_name__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = get_training_setup(__magic_name__ ) # Use a single batch UpperCAmelCase , UpperCAmelCase : Union[str, Any] = next(iter(__magic_name__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__magic_name__ ): step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: # Sync grads step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : Any = ddp_input[torch.randperm(len(__magic_name__ ) )] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_training_setup(__magic_name__ ) # Use a single batch UpperCAmelCase , UpperCAmelCase : List[str] = next(iter(__magic_name__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__magic_name__ ): step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: # Sync grads step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : Tuple = ddp_input[torch.randperm(len(__magic_name__ ) )] def lowercase ( __magic_name__=False , __magic_name__=False ): '''simple docstring''' UpperCAmelCase : Tuple = Accelerator( split_batches=__magic_name__ , dispatch_batches=__magic_name__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = get_training_setup(__magic_name__ ) for iteration, batch in enumerate(__magic_name__ ): UpperCAmelCase , UpperCAmelCase : Any = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Any = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__magic_name__ ): step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__magic_name__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : Tuple = ddp_input[torch.randperm(len(__magic_name__ ) )] GradientState._reset_state() def lowercase ( __magic_name__=False , __magic_name__=False ): '''simple docstring''' UpperCAmelCase : str = Accelerator( split_batches=__magic_name__ , dispatch_batches=__magic_name__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = get_training_setup(__magic_name__ , __magic_name__ ) for iteration, batch in enumerate(__magic_name__ ): UpperCAmelCase , UpperCAmelCase : List[str] = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__magic_name__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__magic_name__ ): step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" UpperCAmelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__magic_name__ )) if accelerator.num_processes > 1: check_model_parameters(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = Accelerator() UpperCAmelCase : Optional[int] = RegressionDataset(length=80 ) UpperCAmelCase : int = DataLoader(__magic_name__ , batch_size=16 ) UpperCAmelCase : Optional[int] = RegressionDataset(length=96 ) UpperCAmelCase : Optional[int] = DataLoader(__magic_name__ , batch_size=16 ) UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(__magic_name__ , __magic_name__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__magic_name__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(__magic_name__ ) if iteration < len(__magic_name__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__magic_name__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(__magic_name__ ) if batch_num < len(__magic_name__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = Accelerator() UpperCAmelCase : List[str] = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(__magic_name__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(__magic_name__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation(__magic_name__ , __magic_name__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation_with_opt_and_scheduler(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' main() if __name__ == "__main__": main()
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging a : Dict = logging.get_logger(__name__) a : Union[str, Any] = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = "data2vec-audio" def __init__( self , snake_case=3_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1e-5 , snake_case="gelu" , snake_case=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case=(5, 2, 2, 2, 2, 2, 2) , snake_case=(1_0, 3, 3, 3, 3, 2, 2) , snake_case=False , snake_case=1_6 , snake_case=1_9 , snake_case=5 , snake_case=0.05 , snake_case=1_0 , snake_case=2 , snake_case=0.0 , snake_case=1_0 , snake_case=0 , snake_case="sum" , snake_case=False , snake_case=False , snake_case=2_5_6 , snake_case=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case=(5, 3, 3, 1, 1) , snake_case=(1, 2, 3, 1, 1) , snake_case=5_1_2 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=False , snake_case=3 , snake_case=2 , snake_case=3 , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) UpperCAmelCase : List[str] = hidden_size UpperCAmelCase : List[Any] = feat_extract_activation UpperCAmelCase : List[str] = list(snake_case ) UpperCAmelCase : Optional[Any] = list(snake_case ) UpperCAmelCase : Optional[int] = list(snake_case ) UpperCAmelCase : List[str] = conv_bias UpperCAmelCase : Tuple = num_conv_pos_embeddings UpperCAmelCase : List[Any] = num_conv_pos_embedding_groups UpperCAmelCase : Tuple = conv_pos_kernel_size UpperCAmelCase : Optional[int] = len(self.conv_dim ) UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : str = num_attention_heads UpperCAmelCase : int = hidden_dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = activation_dropout UpperCAmelCase : List[Any] = feat_proj_dropout UpperCAmelCase : int = final_dropout UpperCAmelCase : Any = layerdrop UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : Any = initializer_range UpperCAmelCase : Union[str, Any] = vocab_size UpperCAmelCase : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : List[str] = mask_time_prob UpperCAmelCase : str = mask_time_length UpperCAmelCase : Tuple = mask_time_min_masks UpperCAmelCase : str = mask_feature_prob UpperCAmelCase : List[str] = mask_feature_length UpperCAmelCase : Union[str, Any] = mask_feature_min_masks # ctc loss UpperCAmelCase : List[Any] = ctc_loss_reduction UpperCAmelCase : str = ctc_zero_infinity # adapter UpperCAmelCase : Optional[int] = add_adapter UpperCAmelCase : Union[str, Any] = adapter_kernel_size UpperCAmelCase : List[Any] = adapter_stride UpperCAmelCase : str = num_adapter_layers UpperCAmelCase : Tuple = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : int = list(snake_case ) UpperCAmelCase : Optional[int] = list(snake_case ) UpperCAmelCase : Union[str, Any] = list(snake_case ) UpperCAmelCase : Tuple = xvector_output_dim @property def A_ ( self ): '''simple docstring''' return math.prod(self.conv_stride )
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,) SCREAMING_SNAKE_CASE__ : Optional[Any] = (("eta", 0.0), ("num_inference_steps", 50)) def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**snake_case ) return config def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : Tuple = self.get_scheduler_config(**snake_case ) UpperCAmelCase : List[str] = scheduler_class(**snake_case ) UpperCAmelCase , UpperCAmelCase : Any = 1_0, 0.0 UpperCAmelCase : Any = self.dummy_model() UpperCAmelCase : str = self.dummy_sample_deter scheduler.set_timesteps(snake_case ) for t in scheduler.timesteps: UpperCAmelCase : Optional[int] = model(snake_case , snake_case ) UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , snake_case ).prev_sample return sample def A_ ( self ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case ) def A_ ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case ) UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase : Any = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase : Dict = scheduler_class(**snake_case ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def A_ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def A_ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def A_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def A_ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def A_ ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case ) def A_ ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case ) def A_ ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def A_ ( self ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=snake_case ) def A_ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=snake_case , num_inference_steps=snake_case ) def A_ ( self ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case , eta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config() UpperCAmelCase : str = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase : Dict = scheduler_class(**snake_case ) UpperCAmelCase , UpperCAmelCase : Dict = 1_0, 0.0 scheduler.set_timesteps(snake_case ) UpperCAmelCase : Optional[Any] = self.dummy_model() UpperCAmelCase : List[Any] = self.dummy_sample_deter UpperCAmelCase : Tuple = self.dummy_sample_deter + 0.1 UpperCAmelCase : List[Any] = self.dummy_sample_deter - 0.1 UpperCAmelCase : Any = samplea.shape[0] UpperCAmelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase : str = torch.arange(snake_case )[0:3, None].repeat(1 , snake_case ) UpperCAmelCase : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase : str = scheduler.batch_step_no_noise(snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case ) UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Tuple = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.full_loop() UpperCAmelCase : Any = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : int = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.22_3967 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase : Tuple = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 ) UpperCAmelCase : Any = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 ) UpperCAmelCase : Dict = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' import math def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__magic_name__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor a : Tuple = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , *snake_case , **snake_case ): '''simple docstring''' warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' import os from pathlib import Path def lowercase ( ): '''simple docstring''' from torch.utils.cpp_extension import load UpperCAmelCase : Optional[Any] = Path(__magic_name__ ).resolve().parent.parent.parent / "kernels" / "deformable_detr" UpperCAmelCase : List[str] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , __magic_name__ , with_cuda=__magic_name__ , extra_include_paths=[str(__magic_name__ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' a : Optional[Any] = 2_56 # Modulus to hash a string a : str = 1_00_00_03 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = len(__magic_name__ ) UpperCAmelCase : Tuple = len(__magic_name__ ) if p_len > t_len: return False UpperCAmelCase : Union[str, Any] = 0 UpperCAmelCase : str = 0 UpperCAmelCase : Union[str, Any] = 1 # Calculating the hash of pattern and substring of text for i in range(__magic_name__ ): UpperCAmelCase : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus UpperCAmelCase : Optional[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue UpperCAmelCase : List[str] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash UpperCAmelCase : Optional[int] = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[str] = "abc1abc12" UpperCAmelCase : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc" UpperCAmelCase : int = "alskfjaldsk23adsfabcabc" assert rabin_karp(__magic_name__ , __magic_name__ ) and not rabin_karp(__magic_name__ , __magic_name__ ) # Test 2) UpperCAmelCase : str = "ABABX" UpperCAmelCase : Optional[int] = "ABABZABABYABABX" assert rabin_karp(__magic_name__ , __magic_name__ ) # Test 3) UpperCAmelCase : int = "AAAB" UpperCAmelCase : Tuple = "ABAAAAAB" assert rabin_karp(__magic_name__ , __magic_name__ ) # Test 4) UpperCAmelCase : str = "abcdabcy" UpperCAmelCase : Optional[Any] = "abcxabcdabxabcdabcdabcy" assert rabin_karp(__magic_name__ , __magic_name__ ) # Test 5) UpperCAmelCase : int = "Lü" UpperCAmelCase : List[Any] = "Lüsai" assert rabin_karp(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[Any] = "Lue" assert not rabin_karp(__magic_name__ , __magic_name__ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Tuple = None @property def A_ ( self ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case , "feature_size" ) ) self.assertTrue(hasattr(snake_case , "sampling_rate" ) ) self.assertTrue(hasattr(snake_case , "padding_value" ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Optional[Any] = feat_extract.model_input_names[0] UpperCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case , processed_features[input_name] ) ) ) UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) UpperCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) UpperCAmelCase : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : int = feat_extract.model_input_names[0] UpperCAmelCase : str = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) UpperCAmelCase : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case ) UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Union[str, Any] = feat_extract.model_input_names[0] UpperCAmelCase : str = BatchFeature({input_name: speech_inputs} , tensor_type="tf" ) UpperCAmelCase : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def A_ ( self , snake_case=False ): '''simple docstring''' def _inputs_have_equal_length(snake_case ): UpperCAmelCase : Dict = len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case , snake_case ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case , snake_case ): if not np.allclose(np.asarray(snake_case ) , np.asarray(snake_case ) , atol=1e-3 ): return False return True UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) UpperCAmelCase : List[Any] = feat_extract.model_input_names[0] UpperCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase : str = self.feat_extract_tester.seq_length_diff UpperCAmelCase : str = self.feat_extract_tester.max_seq_length + pad_diff UpperCAmelCase : int = self.feat_extract_tester.min_seq_length UpperCAmelCase : Dict = self.feat_extract_tester.batch_size UpperCAmelCase : Any = self.feat_extract_tester.feature_size # test padding for List[int] + numpy UpperCAmelCase : Union[str, Any] = feat_extract.pad(snake_case , padding=snake_case ) UpperCAmelCase : int = input_a[input_name] UpperCAmelCase : Any = feat_extract.pad(snake_case , padding="longest" ) UpperCAmelCase : Dict = input_a[input_name] UpperCAmelCase : Union[str, Any] = feat_extract.pad(snake_case , padding="max_length" , max_length=len(speech_inputs[-1] ) ) UpperCAmelCase : int = input_a[input_name] UpperCAmelCase : Optional[int] = feat_extract.pad(snake_case , padding="longest" , return_tensors="np" ) UpperCAmelCase : Any = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(snake_case ): feat_extract.pad(snake_case , padding="max_length" )[input_name] UpperCAmelCase : Dict = feat_extract.pad( snake_case , padding="max_length" , max_length=snake_case , return_tensors="np" ) UpperCAmelCase : Tuple = input_a[input_name] self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case , snake_case ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy UpperCAmelCase : Tuple = feat_extract.pad(snake_case , pad_to_multiple_of=1_0 ) UpperCAmelCase : str = input_a[input_name] UpperCAmelCase : str = feat_extract.pad(snake_case , padding="longest" , pad_to_multiple_of=1_0 ) UpperCAmelCase : str = input_a[input_name] UpperCAmelCase : List[str] = feat_extract.pad( snake_case , padding="max_length" , pad_to_multiple_of=1_0 , max_length=snake_case ) UpperCAmelCase : str = input_a[input_name] UpperCAmelCase : Union[str, Any] = feat_extract.pad( snake_case , padding="max_length" , pad_to_multiple_of=1_0 , max_length=snake_case , return_tensors="np" , ) UpperCAmelCase : Union[str, Any] = input_a[input_name] self.assertTrue(all(len(snake_case ) % 1_0 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(snake_case , snake_case ) ) UpperCAmelCase : Any = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0 self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct UpperCAmelCase : Dict = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def A_ ( self , snake_case=False ): '''simple docstring''' def _inputs_have_equal_length(snake_case ): UpperCAmelCase : int = len(input[0] ) for input_slice in input[1:]: if len(snake_case ) != length: return False return True def _inputs_are_equal(snake_case , snake_case ): if len(snake_case ) != len(snake_case ): return False for input_slice_a, input_slice_a in zip(snake_case , snake_case ): if not np.allclose(np.asarray(snake_case ) , np.asarray(snake_case ) , atol=1e-3 ): return False return True UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case ) UpperCAmelCase : Union[str, Any] = feat_extract.model_input_names[0] UpperCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest UpperCAmelCase : Union[str, Any] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=snake_case ) UpperCAmelCase : Optional[int] = input_a[input_name] UpperCAmelCase : int = feat_extract.pad(snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) ) UpperCAmelCase : Any = input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to smallest with np UpperCAmelCase : Tuple = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=snake_case , ) UpperCAmelCase : List[Any] = input_a[input_name] UpperCAmelCase : List[Any] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" ) UpperCAmelCase : Dict = input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) # truncate to middle UpperCAmelCase : List[str] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=snake_case , return_tensors="np" , ) UpperCAmelCase : Optional[Any] = input_a[input_name] UpperCAmelCase : Union[str, Any] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=snake_case ) UpperCAmelCase : List[Any] = input_a[input_name] UpperCAmelCase : Dict = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" ) UpperCAmelCase : Tuple = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertTrue(_inputs_are_equal(snake_case , snake_case ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case , truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case , padding="longest" , truncation=snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case ): feat_extract.pad(snake_case , padding="longest" , truncation=snake_case )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(snake_case ): feat_extract.pad(snake_case , padding="max_length" , truncation=snake_case )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy UpperCAmelCase : List[Any] = 1_2 UpperCAmelCase : Optional[int] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case , truncation=snake_case , ) UpperCAmelCase : Optional[int] = input_a[input_name] UpperCAmelCase : Optional[int] = feat_extract.pad( snake_case , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case , ) UpperCAmelCase : Union[str, Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of UpperCAmelCase : Any = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: UpperCAmelCase : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(snake_case ) ) self.assertFalse(_inputs_have_equal_length(snake_case ) ) def A_ ( self ): '''simple docstring''' self._check_padding(numpify=snake_case ) def A_ ( self ): '''simple docstring''' self._check_padding(numpify=snake_case ) def A_ ( self ): '''simple docstring''' self._check_truncation(numpify=snake_case ) def A_ ( self ): '''simple docstring''' self._check_truncation(numpify=snake_case ) @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase : int = feat_extract.model_input_names[0] UpperCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase : int = feat_extract.pad(snake_case , padding="longest" , return_tensors="np" )[input_name] UpperCAmelCase : str = feat_extract.pad(snake_case , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase : Any = feat_extract.model_input_names[0] UpperCAmelCase : int = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase : int = feat_extract.pad(snake_case , padding="longest" , return_tensors="np" )[input_name] UpperCAmelCase : Dict = feat_extract.pad(snake_case , padding="longest" , return_tensors="tf" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.feat_extract_dict UpperCAmelCase : Optional[int] = True UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**snake_case ) UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase : List[str] = [len(snake_case ) for x in speech_inputs] UpperCAmelCase : Any = feat_extract.model_input_names[0] UpperCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase : int = feat_extract.pad(snake_case , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , snake_case ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.feat_extract_dict UpperCAmelCase : Optional[Any] = True UpperCAmelCase : str = self.feature_extraction_class(**snake_case ) UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase : Optional[int] = [len(snake_case ) for x in speech_inputs] UpperCAmelCase : str = feat_extract.model_input_names[0] UpperCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase : Optional[int] = min(snake_case ) UpperCAmelCase : Any = feat_extract.pad( snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="np" ) self.assertIn("attention_mask" , snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' from __future__ import annotations a : Tuple = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase : dict[str, str | None] = {} UpperCAmelCase : Optional[Any] = source_vertex def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = {self.source_vertex} UpperCAmelCase : int = None UpperCAmelCase : Any = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase : List[str] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(snake_case ) UpperCAmelCase : Dict = vertex queue.append(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase : int = self.parent.get(snake_case ) if target_vertex_parent is None: UpperCAmelCase : Optional[int] = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(snake_case ) return self.shortest_path(snake_case ) + f"->{target_vertex}" if __name__ == "__main__": a : Tuple = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' import re def lowercase ( __magic_name__ ): '''simple docstring''' return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' try: UpperCAmelCase : Optional[int] = split_input(__magic_name__ ) if upper: UpperCAmelCase : str = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase : Union[str, Any] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowercase ( __magic_name__ ): '''simple docstring''' return to_simple_case(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' try: UpperCAmelCase : List[str] = to_simple_case(__magic_name__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return to_complex_case(__magic_name__ , __magic_name__ , "_" ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return to_complex_case(__magic_name__ , __magic_name__ , "-" ) if __name__ == "__main__": __import__("doctest").testmod()
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer a : str = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast a : Dict = TaTokenizerFast a : List[Any] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys a : List[Any] = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) a : Any = logging.getLogger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=__magic_name__ ) UpperCAmelCase : int = { "repo_id": str(__magic_name__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__magic_name__ , "git_log.json" ) , "w" ) as f: json.dump(__magic_name__ , __magic_name__ , indent=4 ) def lowercase ( __magic_name__ ): '''simple docstring''' if params.n_gpu <= 0: UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[str] = -1 UpperCAmelCase : Tuple = True UpperCAmelCase : List[str] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 UpperCAmelCase : Union[str, Any] = int(os.environ["WORLD_SIZE"] ) UpperCAmelCase : str = int(os.environ["N_GPU_NODE"] ) UpperCAmelCase : Dict = int(os.environ["RANK"] ) # number of nodes / node ID UpperCAmelCase : str = params.world_size // params.n_gpu_per_node UpperCAmelCase : Optional[int] = params.global_rank // params.n_gpu_per_node UpperCAmelCase : Tuple = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 UpperCAmelCase : List[str] = 1 UpperCAmelCase : Any = 0 UpperCAmelCase : Dict = 0 UpperCAmelCase : int = 0 UpperCAmelCase : List[Any] = 1 UpperCAmelCase : Any = 1 UpperCAmelCase : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode UpperCAmelCase : List[str] = params.node_id == 0 and params.local_rank == 0 UpperCAmelCase : Optional[Any] = params.n_nodes > 1 # summary UpperCAmelCase : str = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowercase ( __magic_name__ ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' from __future__ import annotations import math def lowercase ( __magic_name__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True a : Union[str, Any] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def lowercase ( __magic_name__ ): '''simple docstring''' if not isinstance(__magic_name__ , __magic_name__ ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) UpperCAmelCase : int = [] for num in range(len(__magic_name__ ) ): UpperCAmelCase : int = 0 while 2 * i * i <= odd_composites[num]: UpperCAmelCase : int = odd_composites[num] - 2 * i * i if is_prime(__magic_name__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__magic_name__ ) == n: return list_nums return [] def lowercase ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : List[str] = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) UpperCAmelCase : Union[str, Any] = sum(__magic_name__ ) / len(__magic_name__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a : Optional[Any] = { "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : str = get_logger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ): '''simple docstring''' os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): UpperCAmelCase : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase : Optional[Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ ) if accelerator.process_index == 0: logger.info(F"Saving model to {output_model_file}" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase : str = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) UpperCAmelCase : Any = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"Saving model to {output_model_file}" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase : Optional[Any] = os.path.join(__magic_name__ , F"{MODEL_NAME}_{model_index}" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) logger.info(F"Saving model to {ckpt_dir}" ) UpperCAmelCase : Dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , ) logger.info(F"Model saved to {ckpt_dir}" ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__magic_name__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return UpperCAmelCase : List[Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" UpperCAmelCase : Dict = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"Loading model from {input_model_file}" ) UpperCAmelCase : Dict = torch.load(__magic_name__ ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase : Any = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) UpperCAmelCase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"Loading model from {input_model_file}" ) UpperCAmelCase : Dict = torch.load(__magic_name__ ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase : Any = ( os.path.join(__magic_name__ , F"{MODEL_NAME}_{model_index}" ) if F"{MODEL_NAME}" not in input_dir else input_dir ) logger.info(F"Loading model from {ckpt_dir}" ) UpperCAmelCase : List[Any] = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , ) UpperCAmelCase : Any = state_dict["model"] logger.info(F"Model loaded from {ckpt_dir}" ) model.load_state_dict(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ): '''simple docstring''' os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): UpperCAmelCase : int = FSDP.optim_state_dict(__magic_name__ , __magic_name__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: UpperCAmelCase : Optional[int] = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) UpperCAmelCase : List[str] = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"Saving Optimizer state to {output_optimizer_file}" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"Optimizer state saved in {output_optimizer_file}" ) else: UpperCAmelCase : Any = os.path.join(__magic_name__ , F"{OPTIMIZER_NAME}_{optimizer_index}" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) logger.info(F"Saving Optimizer state to {ckpt_dir}" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , ) logger.info(F"Optimizer state saved in {ckpt_dir}" ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase : Union[str, Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: UpperCAmelCase : Any = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"Loading Optimizer state from {input_optimizer_file}" ) UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ ) logger.info(F"Optimizer state loaded from {input_optimizer_file}" ) else: UpperCAmelCase : Tuple = ( os.path.join(__magic_name__ , F"{OPTIMIZER_NAME}_{optimizer_index}" ) if F"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(F"Loading Optimizer from {ckpt_dir}" ) UpperCAmelCase : int = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , ) UpperCAmelCase : int = optim_state["optimizer"] logger.info(F"Optimizer loaded from {ckpt_dir}" ) UpperCAmelCase : str = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ ) optimizer.load_state_dict(__magic_name__ )
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__magic_name__ , __magic_name__ ): raise TypeError("Input value must be a 'int' type" ) return bin(__magic_name__ ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = SwinConfig() UpperCAmelCase : Tuple = swin_name.split("_" ) UpperCAmelCase : Tuple = name_split[1] UpperCAmelCase : Union[str, Any] = int(name_split[4] ) UpperCAmelCase : Any = int(name_split[3][-1] ) if model_size == "tiny": UpperCAmelCase : Optional[Any] = 96 UpperCAmelCase : Tuple = (2, 2, 6, 2) UpperCAmelCase : List[str] = (3, 6, 12, 24) elif model_size == "small": UpperCAmelCase : List[Any] = 96 UpperCAmelCase : Union[str, Any] = (2, 2, 18, 2) UpperCAmelCase : str = (3, 6, 12, 24) elif model_size == "base": UpperCAmelCase : str = 128 UpperCAmelCase : Optional[int] = (2, 2, 18, 2) UpperCAmelCase : Dict = (4, 8, 16, 32) else: UpperCAmelCase : Dict = 192 UpperCAmelCase : Dict = (2, 2, 18, 2) UpperCAmelCase : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: UpperCAmelCase : Any = 2_1841 else: UpperCAmelCase : Any = 1000 UpperCAmelCase : Optional[Any] = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "imagenet-1k-id2label.json" UpperCAmelCase : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : int = idalabel UpperCAmelCase : int = {v: k for k, v in idalabel.items()} UpperCAmelCase : Optional[Any] = img_size UpperCAmelCase : Tuple = num_classes UpperCAmelCase : Any = embed_dim UpperCAmelCase : str = depths UpperCAmelCase : List[Any] = num_heads UpperCAmelCase : Union[str, Any] = window_size return config def lowercase ( __magic_name__ ): '''simple docstring''' if "patch_embed.proj" in name: UpperCAmelCase : List[str] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: UpperCAmelCase : str = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: UpperCAmelCase : str = "encoder." + name if "attn.proj" in name: UpperCAmelCase : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : Any = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : Tuple = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if name == "norm.weight": UpperCAmelCase : Any = "layernorm.weight" if name == "norm.bias": UpperCAmelCase : List[Any] = "layernorm.bias" if "head" in name: UpperCAmelCase : List[Any] = name.replace("head" , "classifier" ) else: UpperCAmelCase : List[str] = "swin." + name return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "mask" in key: continue elif "qkv" in key: UpperCAmelCase : Dict = key.split("." ) UpperCAmelCase : Optional[Any] = int(key_split[1] ) UpperCAmelCase : Dict = int(key_split[3] ) UpperCAmelCase : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase : Dict = val[:dim, :] UpperCAmelCase : str = val[ dim : dim * 2, : ] UpperCAmelCase : Tuple = val[-dim:, :] else: UpperCAmelCase : Optional[int] = val[ :dim ] UpperCAmelCase : List[Any] = val[ dim : dim * 2 ] UpperCAmelCase : Tuple = val[ -dim: ] else: UpperCAmelCase : str = val return orig_state_dict def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = timm.create_model(__magic_name__ , pretrained=__magic_name__ ) timm_model.eval() UpperCAmelCase : str = get_swin_config(__magic_name__ ) UpperCAmelCase : Any = SwinForImageClassification(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(timm_model.state_dict() , __magic_name__ ) model.load_state_dict(__magic_name__ ) UpperCAmelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : int = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) ) UpperCAmelCase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) UpperCAmelCase : Optional[int] = image_processor(images=__magic_name__ , return_tensors="pt" ) UpperCAmelCase : List[Any] = timm_model(inputs["pixel_values"] ) UpperCAmelCase : Tuple = model(**__magic_name__ ).logits assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) a : Optional[Any] = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup a : Tuple = [ "kernels/rwkv/wkv_cuda.cu", "kernels/rwkv/wkv_op.cpp", "kernels/deformable_detr/ms_deform_attn.h", "kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh", "models/graphormer/algos_graphormer.pyx", ] def lowercase ( __magic_name__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": a : Dict = argparse.ArgumentParser() parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.") a : Optional[Any] = parser.parse_args() if args.check_lib: a : List[str] = importlib.import_module("transformers") a : Tuple = Path(transformers_module.__file__).parent else: a : Optional[Any] = Path.cwd() / "build/lib/transformers" if not test_custom_files_are_present(transformers_path): raise ValueError("The built release does not contain the custom files. Fix this before going further!")
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() UpperCAmelCase : str = [sys.executable] + distributed_args execute_subprocess_async(snake_case , env=os.environ.copy() )
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase : Any = { "do_resize": True, "size": {"height": 1_8, "width": 1_8}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } UpperCAmelCase : Dict = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case , snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : Any = self.get_image_processor() UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : List[str] = self.get_tokenizer() UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Any = self.prepare_image_inputs() UpperCAmelCase : int = image_processor(snake_case , return_tensors="np" ) UpperCAmelCase : Tuple = processor(images=snake_case , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_image_processor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : List[Any] = "lower newer" UpperCAmelCase : Union[str, Any] = processor(text=snake_case ) UpperCAmelCase : List[Any] = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_image_processor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Dict = "lower newer" UpperCAmelCase : str = self.prepare_image_inputs() UpperCAmelCase : Optional[Any] = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_image_processor() UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase : Union[str, Any] = processor.batch_decode(snake_case ) UpperCAmelCase : Any = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_image_processor() UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Tuple = "lower newer" UpperCAmelCase : Tuple = self.prepare_image_inputs() UpperCAmelCase : Optional[Any] = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' a : Dict = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" a : List[str] = [{"type": "code", "content": INSTALL_CONTENT}] a : int = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=9_9 , snake_case=0 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_2 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case="last" , snake_case=None , snake_case=None , ): '''simple docstring''' UpperCAmelCase : str = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Any = seq_length UpperCAmelCase : Tuple = is_training UpperCAmelCase : Optional[int] = use_input_lengths UpperCAmelCase : Optional[int] = use_token_type_ids UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : Tuple = gelu_activation UpperCAmelCase : Optional[int] = sinusoidal_embeddings UpperCAmelCase : Union[str, Any] = causal UpperCAmelCase : int = asm UpperCAmelCase : Optional[int] = n_langs UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : List[Any] = n_special UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Dict = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : Any = num_choices UpperCAmelCase : str = summary_type UpperCAmelCase : List[str] = use_proj UpperCAmelCase : List[Any] = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[Any] = None if self.use_input_lengths: UpperCAmelCase : str = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCAmelCase : List[Any] = None UpperCAmelCase : str = None UpperCAmelCase : Tuple = None if self.use_labels: UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Any = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = FlaubertModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , lengths=snake_case , langs=snake_case ) UpperCAmelCase : List[str] = model(snake_case , langs=snake_case ) UpperCAmelCase : List[Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : int = FlaubertForQuestionAnsweringSimple(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : str = model(snake_case ) UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = FlaubertForQuestionAnswering(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : int = model(snake_case ) UpperCAmelCase : Any = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , ) UpperCAmelCase : List[str] = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , ) ((UpperCAmelCase) , ) : List[Any] = result_with_labels.to_tuple() UpperCAmelCase : Union[str, Any] = model(snake_case , start_positions=snake_case , end_positions=snake_case ) ((UpperCAmelCase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Dict = FlaubertForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case ) UpperCAmelCase : Optional[Any] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : str = self.num_labels UpperCAmelCase : Optional[int] = FlaubertForTokenClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.num_choices UpperCAmelCase : Union[str, Any] = FlaubertForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Union[str, Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[str] = config_and_inputs UpperCAmelCase : Any = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : List[str] = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A_ ( self , snake_case , snake_case , snake_case=False ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCAmelCase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) UpperCAmelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) return inputs_dict def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = FlaubertModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case , emb_dim=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Tuple = FlaubertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @slow @require_torch_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCAmelCase : Dict = True UpperCAmelCase : Optional[int] = model_class(config=snake_case ) UpperCAmelCase : List[str] = self._prepare_for_class(snake_case , snake_case ) UpperCAmelCase : List[str] = torch.jit.trace( snake_case , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case , os.path.join(snake_case , "traced_model.pt" ) ) UpperCAmelCase : Union[str, Any] = torch.jit.load(os.path.join(snake_case , "traced_model.pt" ) , map_location=snake_case ) loaded(inputs_dict["input_ids"].to(snake_case ) , inputs_dict["attention_mask"].to(snake_case ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) UpperCAmelCase : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): UpperCAmelCase : Any = model(snake_case )[0] UpperCAmelCase : List[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Dict = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = tempfile.mkdtemp() UpperCAmelCase : List[Any] = 8 # DPR tok UpperCAmelCase : Union[str, Any] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase : Dict = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case , exist_ok=snake_case ) UpperCAmelCase : Optional[int] = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok UpperCAmelCase : int = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase : Any = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase : Dict = {"unk_token": "<unk>"} UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case , exist_ok=snake_case ) UpperCAmelCase : Optional[Any] = os.path.join(snake_case , BART_VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : int = os.path.join(snake_case , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case ) ) def A_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def A_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def A_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def A_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.get_dummy_dataset() UpperCAmelCase : Optional[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase : Dict = dataset UpperCAmelCase : int = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.get_dummy_dataset() UpperCAmelCase : Any = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: UpperCAmelCase : Dict = os.path.join(self.tmpdirname , "dataset" ) UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset UpperCAmelCase : int = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCAmelCase : str = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case ) , ) return retriever def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase : int = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) UpperCAmelCase : str = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case , open(snake_case , "wb" ) ) UpperCAmelCase : Any = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) UpperCAmelCase : Dict = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = 1 UpperCAmelCase : int = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: UpperCAmelCase : Tuple = self.get_dummy_dataset() retriever.save_pretrained(snake_case ) UpperCAmelCase : Any = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Union[str, Any] = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = 1 UpperCAmelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) UpperCAmelCase : Dict = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Any = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 1 UpperCAmelCase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) UpperCAmelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : List[str] = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = 1 UpperCAmelCase : Union[str, Any] = self.get_dummy_legacy_index_retriever() UpperCAmelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) UpperCAmelCase : Tuple = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Dict = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self ): '''simple docstring''' import torch UpperCAmelCase : Any = 1 UpperCAmelCase : Any = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : List[Any] = [[5, 7], [1_0, 1_1]] UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Optional[Any] = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case , snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertIsInstance(snake_case , np.ndarray ) UpperCAmelCase : List[Any] = retriever( snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case , return_tensors="pt" , ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case , torch.Tensor ) self.assertIsInstance(snake_case , torch.Tensor ) self.assertIsInstance(snake_case , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase : int = 1 UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) retriever.set_ctx_encoder_tokenizer(snake_case ) UpperCAmelCase : Dict = [[5, 7], [1_0, 1_1]] UpperCAmelCase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : List[str] = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case ) self.assertEqual( len(snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case ) # check for doc token related keys in dictionary.
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowercase ( __magic_name__ = True , *__magic_name__ , **__magic_name__ ): '''simple docstring''' if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) UpperCAmelCase : List[str] = False if main_process_only: UpperCAmelCase : str = PartialState().local_process_index == 0 return _tqdm(*__magic_name__ , **__magic_name__ , disable=__magic_name__ )
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' a : Optional[int] = 0 # The first color of the flag. a : Union[str, Any] = 1 # The second color of the flag. a : Any = 2 # The third color of the flag. a : List[str] = (red, white, blue) def lowercase ( __magic_name__ ): '''simple docstring''' if not sequence: return [] if len(__magic_name__ ) == 1: return list(__magic_name__ ) UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : List[str] = len(__magic_name__ ) - 1 UpperCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: UpperCAmelCase , UpperCAmelCase : str = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCAmelCase , UpperCAmelCase : Dict = sequence[high], sequence[mid] high -= 1 else: UpperCAmelCase : Optional[Any] = F"The elements inside the sequence must contains only {colors} values" raise ValueError(__magic_name__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() a : Optional[int] = input("Enter numbers separated by commas:\n").strip() a : Tuple = [int(item.strip()) for item in user_input.split(",")] print(F'{dutch_national_flag_sort(unsorted)}')
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' from __future__ import annotations def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = set(__magic_name__ ), [start] while stack: UpperCAmelCase : List[Any] = stack.pop() explored.add(__magic_name__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__magic_name__ ) return explored a : int = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = 3 UpperCAmelCase : Tuple = 2_5_0 UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, length) , snake_case ) UpperCAmelCase : int = torch.ones((batch_size, length) , device=snake_case , dtype=torch.float ) / length return input_ids, scores def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self._get_tensors(5 ) UpperCAmelCase : str = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case , snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) UpperCAmelCase , UpperCAmelCase : Dict = self._get_tensors(5 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : int = self._get_tensors(9 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case , snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) UpperCAmelCase , UpperCAmelCase : str = self._get_tensors(5 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase , UpperCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case , snake_case ) ) UpperCAmelCase : str = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self._get_tensors(5 ) UpperCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(snake_case , snake_case ) ) UpperCAmelCase : Any = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(snake_case , snake_case ) ) def A_ ( self ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(snake_case ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) UpperCAmelCase : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(snake_case ) , 1 )
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : List[str] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : int = is_training UpperCAmelCase : Optional[int] = use_input_mask UpperCAmelCase : int = use_token_type_ids UpperCAmelCase : int = use_labels UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : Any = hidden_size UpperCAmelCase : List[Any] = embedding_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[int] = type_vocab_size UpperCAmelCase : int = type_sequence_label_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Dict = num_labels UpperCAmelCase : Any = num_choices UpperCAmelCase : int = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_input_mask: UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : Any = None UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = MegatronBertModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case ) UpperCAmelCase : List[Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = MegatronBertForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = MegatronBertForCausalLM(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MegatronBertForNextSentencePrediction(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = MegatronBertForPreTraining(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = MegatronBertForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Optional[Any] = MegatronBertForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Any = MegatronBertForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.num_choices UpperCAmelCase : Any = MegatronBertForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Dict = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = True # test_resize_embeddings = False SCREAMING_SNAKE_CASE__ : List[Any] = False def A_ ( self , snake_case , snake_case , snake_case=False ): '''simple docstring''' UpperCAmelCase : str = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class in get_values(snake_case ): UpperCAmelCase : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case ) UpperCAmelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) return inputs_dict def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = MegatronBertModelTester(self ) UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case ) def lowercase ( __magic_name__ ): '''simple docstring''' return torch.tensor( __magic_name__ , dtype=torch.long , device=__magic_name__ , ) a : Tuple = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip("Model is not available." ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: UpperCAmelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , snake_case ) UpperCAmelCase : Any = MegatronBertModel.from_pretrained(snake_case ) model.to(snake_case ) model.half() UpperCAmelCase : Optional[int] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(snake_case )[0] UpperCAmelCase : Optional[Any] = torch.Size((1, 9, 1_0_2_4) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Any = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): UpperCAmelCase : Union[str, Any] = output[0, ii, jj] UpperCAmelCase : Union[str, Any] = expected[3 * ii + jj] UpperCAmelCase : Any = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case ) self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case ): '''simple docstring''' super().__init__(*snake_case , **snake_case ) UpperCAmelCase : Union[str, Any] = eval_examples UpperCAmelCase : int = post_process_function def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case = "eval" ): '''simple docstring''' UpperCAmelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase : Optional[int] = self.get_eval_dataloader(snake_case ) UpperCAmelCase : List[str] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase : Optional[Any] = self.compute_metrics UpperCAmelCase : Dict = None UpperCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase : List[str] = time.time() try: UpperCAmelCase : List[str] = eval_loop( snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , ) finally: UpperCAmelCase : str = compute_metrics UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase : int = self.post_process_function(snake_case , snake_case , output.predictions ) UpperCAmelCase : Optional[int] = self.compute_metrics(snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): UpperCAmelCase : Union[str, Any] = metrics.pop(snake_case ) metrics.update(output.metrics ) else: UpperCAmelCase : Tuple = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(snake_case ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case ) return metrics def A_ ( self , snake_case , snake_case , snake_case=None , snake_case = "test" ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_test_dataloader(snake_case ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase : Optional[int] = self.compute_metrics UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase : Optional[Any] = time.time() try: UpperCAmelCase : Dict = eval_loop( snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , ) finally: UpperCAmelCase : Tuple = compute_metrics UpperCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase : str = self.post_process_function(snake_case , snake_case , output.predictions , "predict" ) UpperCAmelCase : Dict = self.compute_metrics(snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): UpperCAmelCase : List[Any] = metrics.pop(snake_case ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off UpperCAmelCase : Optional[int] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase : Tuple = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase : List[str] = {"unk_token": "<unk>"} UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case ) ) UpperCAmelCase : List[str] = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], } UpperCAmelCase : int = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case , snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.get_tokenizer() UpperCAmelCase : int = self.get_rust_tokenizer() UpperCAmelCase : List[Any] = self.get_image_processor() UpperCAmelCase : str = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) UpperCAmelCase : int = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case ) UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_image_processor() UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Optional[int] = self.prepare_image_inputs() UpperCAmelCase : Any = image_processor(snake_case , return_tensors="np" ) UpperCAmelCase : Union[str, Any] = processor(images=snake_case , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : Any = self.get_tokenizer() UpperCAmelCase : str = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Optional[int] = "lower newer" UpperCAmelCase : int = processor(text=snake_case , return_tensors="np" ) UpperCAmelCase : Dict = tokenizer(snake_case , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_image_processor() UpperCAmelCase : List[str] = self.get_tokenizer() UpperCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Optional[Any] = "lower newer" UpperCAmelCase : Dict = self.prepare_image_inputs() UpperCAmelCase : str = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "google/owlvit-base-patch32" UpperCAmelCase : int = OwlViTProcessor.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = ["cat", "nasa badge"] UpperCAmelCase : str = processor(text=snake_case ) UpperCAmelCase : Dict = 1_6 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "google/owlvit-base-patch32" UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(snake_case ) UpperCAmelCase : int = [["cat", "nasa badge"], ["person"]] UpperCAmelCase : str = processor(text=snake_case ) UpperCAmelCase : Tuple = 1_6 UpperCAmelCase : Union[str, Any] = len(snake_case ) UpperCAmelCase : Tuple = max([len(snake_case ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = "google/owlvit-base-patch32" UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(snake_case ) UpperCAmelCase : Any = ["cat", "nasa badge"] UpperCAmelCase : str = processor(text=snake_case ) UpperCAmelCase : Union[str, Any] = 1_6 UpperCAmelCase : str = inputs["input_ids"] UpperCAmelCase : Union[str, Any] = [ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_image_processor() UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : Any = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : List[str] = self.prepare_image_inputs() UpperCAmelCase : Tuple = self.prepare_image_inputs() UpperCAmelCase : List[Any] = processor(images=snake_case , query_images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : Any = self.get_tokenizer() UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=snake_case , image_processor=snake_case ) UpperCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase : Tuple = processor.batch_decode(snake_case ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case )
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowercase ( __magic_name__ = 3 ): '''simple docstring''' if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(__magic_name__ ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) UpperCAmelCase : Dict = QuantumRegister(__magic_name__ , "qr" ) UpperCAmelCase : Tuple = ClassicalRegister(__magic_name__ , "cr" ) UpperCAmelCase : Tuple = QuantumCircuit(__magic_name__ , __magic_name__ ) UpperCAmelCase : Union[str, Any] = number_of_qubits for i in range(__magic_name__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__magic_name__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__magic_name__ , __magic_name__ ) # simulate with 10000 shots UpperCAmelCase : int = Aer.get_backend("qasm_simulator" ) UpperCAmelCase : int = execute(__magic_name__ , __magic_name__ , shots=1_0000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print( F'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[Any] = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' import math def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = [True] * n UpperCAmelCase : Tuple = False UpperCAmelCase : str = False UpperCAmelCase : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCAmelCase : Union[str, Any] = i * 2 while index < n: UpperCAmelCase : Dict = False UpperCAmelCase : int = index + i UpperCAmelCase : Union[str, Any] = [2] for i in range(3 , __magic_name__ , 2 ): if is_prime[i]: primes.append(__magic_name__ ) return primes def lowercase ( __magic_name__ = 9999_6666_3333 ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = math.floor(math.sqrt(__magic_name__ ) ) + 100 UpperCAmelCase : List[Any] = prime_sieve(__magic_name__ ) UpperCAmelCase : Tuple = 0 UpperCAmelCase : str = 0 UpperCAmelCase : Optional[Any] = primes[prime_index] while (last_prime**2) <= limit: UpperCAmelCase : Optional[int] = primes[prime_index + 1] UpperCAmelCase : Union[str, Any] = last_prime**2 UpperCAmelCase : List[Any] = next_prime**2 # Get numbers divisible by lps(current) UpperCAmelCase : int = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCAmelCase : List[str] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCAmelCase : Union[str, Any] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCAmelCase : List[Any] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Union[str, Any] = logging.get_logger(__name__) a : str = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class UpperCamelCase__ ( lowercase__ , lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "convnextv2" def __init__( self , snake_case=3 , snake_case=4 , snake_case=4 , snake_case=None , snake_case=None , snake_case="gelu" , snake_case=0.02 , snake_case=1e-12 , snake_case=0.0 , snake_case=2_2_4 , snake_case=None , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : str = num_channels UpperCAmelCase : List[Any] = patch_size UpperCAmelCase : List[Any] = num_stages UpperCAmelCase : Any = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes UpperCAmelCase : Dict = [3, 3, 9, 3] if depths is None else depths UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : str = initializer_range UpperCAmelCase : str = layer_norm_eps UpperCAmelCase : Any = drop_path_rate UpperCAmelCase : List[str] = image_size UpperCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] UpperCAmelCase , UpperCAmelCase : List[str] = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCAmelCase : Any = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase : Tuple = torch.permute(__magic_name__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__magic_name__ ): # linear layer UpperCAmelCase : Dict = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase : List[str] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' if "metadata" in layer: UpperCAmelCase : Any = layer.split("metadata" ) UpperCAmelCase : Dict = "".join(split_layer[0] )[:-1] UpperCAmelCase : Optional[int] = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: UpperCAmelCase : List[Any] = layer.split("kvstore" ) UpperCAmelCase : List[str] = "".join(split_layer[0] )[:-1] UpperCAmelCase : List[str] = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: UpperCAmelCase : Any = layer.split("/" ) UpperCAmelCase : List[Any] = "/".join(split_layer[:-1] ) UpperCAmelCase : int = (split_layer[-1],) if "kvstore/path" in layer: UpperCAmelCase : Dict = F"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: UpperCAmelCase : Union[str, Any] = "file" else: UpperCAmelCase : Optional[Any] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = rename_keys(__magic_name__ ) UpperCAmelCase : List[str] = {} for k, v in current_block.items(): UpperCAmelCase : List[Any] = v UpperCAmelCase : Any = new_current_block torch.save(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = WEIGHTS_NAME ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = convert_file_size_to_int(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Optional[int] = {} UpperCAmelCase : int = 0 UpperCAmelCase : Optional[int] = 0 os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: UpperCAmelCase : List[str] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] UpperCAmelCase : Optional[int] = flatten_dict(__magic_name__ , sep="/" ) UpperCAmelCase : Tuple = {} for layer in checkpoint_info.keys(): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_key_and_tensorstore_dict( __magic_name__ , __magic_name__ , __magic_name__ ) if curr_real_layer_name in all_layers: UpperCAmelCase : Optional[int] = content else: UpperCAmelCase : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCAmelCase : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCAmelCase : List[Any] = torch.tensor(__magic_name__ ) UpperCAmelCase : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCAmelCase , UpperCAmelCase : Any = rename_base_flax_keys(tuple(key.split("/" ) ) , __magic_name__ ) UpperCAmelCase : List[Any] = "/".join(__magic_name__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCAmelCase : Optional[Any] = os.path.join( __magic_name__ , weights_name.replace(".bin" , F"-{len(__magic_name__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(__magic_name__ , __magic_name__ ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCAmelCase : Optional[int] = {} UpperCAmelCase : int = 0 UpperCAmelCase : Any = raw_weights.to(getattr(__magic_name__ , __magic_name__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(__magic_name__ , weights_name.replace(".bin" , F"-{len(__magic_name__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(__magic_name__ , __magic_name__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__magic_name__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCAmelCase : str = {} UpperCAmelCase : str = {} for idx, shard in enumerate(__magic_name__ ): UpperCAmelCase : Union[str, Any] = weights_name.replace( ".bin" , F"-{idx+1:05d}-of-{len(__magic_name__ ):05d}.bin" ) # len(sharded_state_dicts):05d} UpperCAmelCase : List[Any] = os.path.join(__magic_name__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) UpperCAmelCase : Tuple = shard for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : List[str] = {"total_size": total_size} UpperCAmelCase : Union[str, Any] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__magic_name__ , __magic_name__ ) , "w" , encoding="utf-8" ) as f: UpperCAmelCase : Any = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) return metadata, index if __name__ == "__main__": a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) a : str = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowercase ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) UpperCAmelCase : Optional[Any] = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) UpperCAmelCase : Optional[Any] = TaTokenizer.from_pretrained("t5-small" ) UpperCAmelCase : Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." UpperCAmelCase : Any = tokenizer(__magic_name__ , return_tensors="pt" ).input_ids UpperCAmelCase : List[str] = model.generate(__magic_name__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = x UpperCAmelCase : Optional[int] = y for step in range(__magic_name__ ): # noqa: B007 UpperCAmelCase : Optional[Any] = a * a - b * b + x UpperCAmelCase : List[str] = 2 * a * b + y UpperCAmelCase : int = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowercase ( __magic_name__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowercase ( __magic_name__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__magic_name__ , 1 , 1 ) ) def lowercase ( __magic_name__ = 800 , __magic_name__ = 600 , __magic_name__ = -0.6 , __magic_name__ = 0 , __magic_name__ = 3.2 , __magic_name__ = 50 , __magic_name__ = True , ): '''simple docstring''' UpperCAmelCase : int = Image.new("RGB" , (image_width, image_height) ) UpperCAmelCase : List[str] = img.load() # loop through the image-coordinates for image_x in range(__magic_name__ ): for image_y in range(__magic_name__ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase : Tuple = figure_width / image_width * image_height UpperCAmelCase : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase : Union[str, Any] = get_distance(__magic_name__ , __magic_name__ , __magic_name__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase : Optional[Any] = get_color_coded_rgb(__magic_name__ ) else: UpperCAmelCase : str = get_black_and_white_rgb(__magic_name__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure a : Dict = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' import os import sys import unittest a : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) a : List[Any] = os.path.join("tests", "models", "bert", "test_modeling_bert.py") a : Union[str, Any] = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = get_test_to_tester_mapping(snake_case ) UpperCAmelCase : Union[str, Any] = get_test_to_tester_mapping(snake_case ) UpperCAmelCase : int = {"BertModelTest": "BertModelTester"} UpperCAmelCase : Union[str, Any] = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = get_model_to_test_mapping(snake_case ) UpperCAmelCase : Optional[int] = get_model_to_test_mapping(snake_case ) UpperCAmelCase : int = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } UpperCAmelCase : int = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = get_model_to_tester_mapping(snake_case ) UpperCAmelCase : Dict = get_model_to_tester_mapping(snake_case ) UpperCAmelCase : Tuple = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } UpperCAmelCase : Any = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : str = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = DebertaVaTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE__ : str = True SCREAMING_SNAKE_CASE__ : List[Any] = True def A_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = "this is a test" UpperCAmelCase : str = "this is a test" return input_text, output_text def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "<pad>" UpperCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(snake_case ) , 3_0_0_0_1 ) def A_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = " \tHeLLo!how \n Are yoU? " UpperCAmelCase : Dict = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , do_lower_case=snake_case ) UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case ) UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def A_ ( self ): '''simple docstring''' pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé." UpperCAmelCase : Dict = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : List[str] = DebertaVaTokenizer(snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , split_by_punct=snake_case ) UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : int = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase : int = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : Optional[Any] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = " \tHeLLo!how \n Are yoU? " UpperCAmelCase : Any = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase : Tuple = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = self.get_rust_tokenizer() UpperCAmelCase : Tuple = tokenizer.encode(snake_case ) UpperCAmelCase : List[Any] = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "This is a test" UpperCAmelCase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] UpperCAmelCase : Any = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , keep_accents=snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , keep_accents=snake_case ) UpperCAmelCase : str = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) # fmt: off UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase : List[str] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] UpperCAmelCase : List[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = DebertaVaTokenizer(snake_case ) UpperCAmelCase : str = tokenizer.encode("sequence builders" ) UpperCAmelCase : List[str] = tokenizer.encode("multi-sequence build" ) UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case ) UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case , ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowercase ( ): '''simple docstring''' UpperCAmelCase : Dict = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" ) UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(__magic_name__ ) DownloadCommand.register_subcommand(__magic_name__ ) EnvironmentCommand.register_subcommand(__magic_name__ ) RunCommand.register_subcommand(__magic_name__ ) ServeCommand.register_subcommand(__magic_name__ ) UserCommands.register_subcommand(__magic_name__ ) AddNewModelCommand.register_subcommand(__magic_name__ ) AddNewModelLikeCommand.register_subcommand(__magic_name__ ) LfsCommands.register_subcommand(__magic_name__ ) PTtoTFCommand.register_subcommand(__magic_name__ ) # Let's go UpperCAmelCase : List[Any] = parser.parse_args() if not hasattr(__magic_name__ , "func" ): parser.print_help() exit(1 ) # Run UpperCAmelCase : List[str] = args.func(__magic_name__ ) service.run() if __name__ == "__main__": main()
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=snake_case , ) assert hasattr(self , "env" ) def A_ ( self , snake_case=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , ) def A_ ( self , snake_case ): '''simple docstring''' TrainingJobAnalytics(snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) UpperCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase : Dict = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case )
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : str = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ ) UpperCAmelCase : int = emb.weight.data return lin_layer def lowercase ( __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Tuple = {} for old_key in state_dict.keys(): UpperCAmelCase : Tuple = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : Union[str, Any] = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: UpperCAmelCase : List[str] = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: UpperCAmelCase : Any = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: UpperCAmelCase : Optional[Any] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : Tuple = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: UpperCAmelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" ) UpperCAmelCase : Optional[Any] = state_dict[old_key] return new_dict def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = WEIGHTS_NAME ): '''simple docstring''' UpperCAmelCase : Any = [] UpperCAmelCase : Any = 0 os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) for expert in range(__magic_name__ ): UpperCAmelCase : List[str] = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(__magic_name__ ): UpperCAmelCase : Dict = torch.load(__magic_name__ )["model"] remove_ignore_keys_(__magic_name__ ) UpperCAmelCase : List[str] = rename_fairseq_keys(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = os.path.join( __magic_name__ , weights_name.replace(".bin" , F"-{len(__magic_name__ )+1:05d}-of-???.bin" ) ) torch.save(__magic_name__ , __magic_name__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__magic_name__ )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , weights_name.replace(".bin" , F"-{len(__magic_name__ )+1:05d}-of-???.bin" ) ) UpperCAmelCase : Optional[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(__magic_name__ ) UpperCAmelCase : Optional[int] = rename_fairseq_keys(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__magic_name__ ) == 1: UpperCAmelCase : List[str] = os.path.join(__magic_name__ , __magic_name__ ) torch.save(__magic_name__ , __magic_name__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__magic_name__ , __magic_name__ ) # Otherwise, let's build the index UpperCAmelCase : str = {} for idx, shard in enumerate(__magic_name__ ): UpperCAmelCase : List[Any] = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(__magic_name__ ):05d}.bin" ) UpperCAmelCase : Dict = os.path.join(__magic_name__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) for key in shard: UpperCAmelCase : int = shard_file # Add the metadata UpperCAmelCase : List[Any] = {"total_size": total_size} UpperCAmelCase : Optional[int] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__magic_name__ , __magic_name__ ) , "w" , encoding="utf-8" ) as f: UpperCAmelCase : str = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) return metadata, index if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) a : Union[str, Any] = parser.parse_args() a , a : Tuple = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a : List[str] = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() UpperCAmelCase : Dict = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Dict = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } UpperCAmelCase : Any = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 1_6_0_0_0, "return_attention_mask": False, "do_normalize": True, } UpperCAmelCase : str = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , snake_case ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) # load decoder from hub UpperCAmelCase : Tuple = "hf-internal-testing/ngram-beam-search-decoder" def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : str = self.add_kwargs_tokens_map.copy() kwargs.update(snake_case ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case ) def A_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.get_tokenizer() UpperCAmelCase : Optional[int] = self.get_feature_extractor() UpperCAmelCase : Dict = self.get_decoder() UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , snake_case ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(snake_case , "include" ): WavaVecaProcessorWithLM( tokenizer=snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_feature_extractor() UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : Tuple = self.get_decoder() UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : int = floats_list((3, 1_0_0_0) ) UpperCAmelCase : Dict = feature_extractor(snake_case , return_tensors="np" ) UpperCAmelCase : Optional[int] = processor(snake_case , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_feature_extractor() UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : List[Any] = self.get_decoder() UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : Optional[Any] = "This is a test string" UpperCAmelCase : List[str] = processor(text=snake_case ) UpperCAmelCase : List[Any] = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self , snake_case=(2, 1_0, 1_6) , snake_case=7_7 ): '''simple docstring''' np.random.seed(snake_case ) return np.random.rand(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.get_feature_extractor() UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[Any] = self.get_decoder() UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : Optional[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 ) UpperCAmelCase : str = processor.decode(snake_case ) UpperCAmelCase : Optional[int] = decoder.decode_beams(snake_case )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("</s> <s> </s>" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_feature_extractor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : Optional[Any] = self.get_decoder() UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : Dict = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase : int = processor.batch_decode(snake_case ) else: with get_context(snake_case ).Pool() as pool: UpperCAmelCase : Tuple = processor.batch_decode(snake_case , snake_case ) UpperCAmelCase : Dict = list(snake_case ) with get_context("fork" ).Pool() as p: UpperCAmelCase : Optional[int] = decoder.decode_beams_batch(snake_case , snake_case ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(snake_case , decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text ) self.assertListEqual(snake_case , decoded_processor.logit_score ) self.assertListEqual(snake_case , decoded_processor.lm_score ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_feature_extractor() UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase : str = self.get_decoder() UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : Union[str, Any] = self._get_dummy_logits() UpperCAmelCase : str = 1_5 UpperCAmelCase : int = -20.0 UpperCAmelCase : Dict = -4.0 UpperCAmelCase : Any = processor.batch_decode( snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , ) UpperCAmelCase : int = decoded_processor_out.text UpperCAmelCase : Any = list(snake_case ) with get_context("fork" ).Pool() as pool: UpperCAmelCase : Optional[int] = decoder.decode_beams_batch( snake_case , snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , ) UpperCAmelCase : int = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase : int = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase : Any = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(snake_case , snake_case ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , snake_case ) self.assertTrue(np.array_equal(snake_case , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , snake_case , atol=1e-3 ) ) self.assertTrue(np.array_equal(snake_case , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , snake_case , atol=1e-3 ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_feature_extractor() UpperCAmelCase : Optional[int] = self.get_tokenizer() UpperCAmelCase : Dict = self.get_decoder() UpperCAmelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) UpperCAmelCase : str = self._get_dummy_logits() UpperCAmelCase : int = 2.0 UpperCAmelCase : int = 5.0 UpperCAmelCase : List[str] = -20.0 UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : str = processor.batch_decode( snake_case , alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , ) UpperCAmelCase : List[Any] = decoded_processor_out.text UpperCAmelCase : Tuple = list(snake_case ) decoder.reset_params( alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , ) with get_context("fork" ).Pool() as pool: UpperCAmelCase : int = decoder.decode_beams_batch( snake_case , snake_case , ) UpperCAmelCase : List[Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(snake_case , snake_case ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , snake_case ) UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() UpperCAmelCase : Union[str, Any] = os.listdir(snake_case ) UpperCAmelCase : Optional[Any] = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = snapshot_download("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(snake_case ) UpperCAmelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase : List[str] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() UpperCAmelCase : Dict = os.listdir(snake_case ) UpperCAmelCase : List[str] = os.listdir(snake_case ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : int = floats_list((3, 1_0_0_0) ) UpperCAmelCase : List[str] = processor_wavaveca(snake_case , return_tensors="np" ) UpperCAmelCase : str = processor_auto(snake_case , return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase : Optional[int] = self._get_dummy_logits() UpperCAmelCase : Optional[int] = processor_wavaveca.batch_decode(snake_case ) UpperCAmelCase : Union[str, Any] = processor_auto.batch_decode(snake_case ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_feature_extractor() UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : Any = self.get_decoder() UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) @staticmethod def A_ ( snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = [d[key] for d in offsets] return retrieved_list def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : List[Any] = self._get_dummy_logits()[0] UpperCAmelCase : Any = processor.decode(snake_case , output_word_offsets=snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(snake_case , snake_case ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase : Optional[Any] = self._get_dummy_logits() UpperCAmelCase : Union[str, Any] = processor.batch_decode(snake_case , output_word_offsets=snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(snake_case , snake_case ) ) self.assertListEqual( [" ".join(self.get_from_offsets(snake_case , "word" ) ) for o in outputs["word_offsets"]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def A_ ( self ): '''simple docstring''' import torch UpperCAmelCase : Optional[int] = load_dataset("common_voice" , "en" , split="train" , streaming=snake_case ) UpperCAmelCase : str = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) ) UpperCAmelCase : Tuple = iter(snake_case ) UpperCAmelCase : str = next(snake_case ) UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) UpperCAmelCase : Optional[int] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase : List[str] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(snake_case ).logits.cpu().numpy() UpperCAmelCase : Optional[int] = processor.decode(logits[0] , output_word_offsets=snake_case ) UpperCAmelCase : Dict = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase : List[Any] = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] UpperCAmelCase : int = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(snake_case , "word" ) ) , snake_case ) self.assertEqual(" ".join(self.get_from_offsets(snake_case , "word" ) ) , output.text ) # output times UpperCAmelCase : Dict = torch.tensor(self.get_from_offsets(snake_case , "start_time" ) ) UpperCAmelCase : Dict = torch.tensor(self.get_from_offsets(snake_case , "end_time" ) ) # fmt: off UpperCAmelCase : str = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) UpperCAmelCase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) ) self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) a : Tuple = logging.getLogger() def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase : List[str] = parser.parse_args() return args.f class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(snake_case , "argv" , snake_case ): UpperCAmelCase : int = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(snake_case , 0.666 ) @slow @require_torch_non_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(snake_case ) UpperCAmelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(snake_case ) UpperCAmelCase : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(snake_case )
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=2 , snake_case=8 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=1_6 , snake_case=5 , snake_case=2 , snake_case=3_6 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : int = batch_size UpperCAmelCase : int = seq_length UpperCAmelCase : str = is_training UpperCAmelCase : str = use_input_mask UpperCAmelCase : Optional[Any] = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : Any = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : str = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : Tuple = max_position_embeddings UpperCAmelCase : int = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Union[str, Any] = num_choices UpperCAmelCase : List[Any] = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : List[str] = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Optional[Any] = None if self.use_labels: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_config() UpperCAmelCase : Dict = 3_0_0 return config def A_ ( self ): '''simple docstring''' ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase : Tuple = True UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = MraModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) UpperCAmelCase : Any = model(snake_case , token_type_ids=snake_case ) UpperCAmelCase : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Dict = True UpperCAmelCase : Any = MraModel(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) UpperCAmelCase : List[str] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , ) UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = MraForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = MraForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.num_labels UpperCAmelCase : Any = MraForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = self.num_labels UpperCAmelCase : List[Any] = MraForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = self.num_choices UpperCAmelCase : Dict = MraForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[Any] = config_and_inputs UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Optional[int] = () def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = MraModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase : Optional[int] = type self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : List[Any] = MraModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip(reason="MRA does not output attentions" ) def A_ ( self ): '''simple docstring''' return @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) UpperCAmelCase : List[Any] = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase : List[Any] = model(snake_case )[0] UpperCAmelCase : Dict = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) UpperCAmelCase : Any = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase : Tuple = model(snake_case )[0] UpperCAmelCase : Union[str, Any] = 5_0_2_6_5 UpperCAmelCase : List[Any] = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Tuple = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) UpperCAmelCase : Optional[Any] = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase : List[Any] = model(snake_case )[0] UpperCAmelCase : Dict = 5_0_2_6_5 UpperCAmelCase : List[str] = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a : Any = datasets.utils.logging.get_logger(__name__) a : Optional[Any] = ["names", "prefix"] a : Dict = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] a : Optional[Any] = ["encoding_errors", "on_bad_lines"] a : List[Any] = ["date_format"] @dataclass class UpperCamelCase__ ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "," SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : Optional[Union[int, List[int], str]] = "infer" SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None SCREAMING_SNAKE_CASE__ : Optional[Union[int, str, List[int], List[str]]] = None SCREAMING_SNAKE_CASE__ : Optional[Union[List[int], List[str]]] = None SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : Optional[Literal["c", "python", "pyarrow"]] = None SCREAMING_SNAKE_CASE__ : Dict[Union[int, str], Callable[[Any], Any]] = None SCREAMING_SNAKE_CASE__ : Optional[list] = None SCREAMING_SNAKE_CASE__ : Optional[list] = None SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : Optional[Union[int, List[int]]] = None SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : str = "." SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : str = '"' SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : Optional[str] = None SCREAMING_SNAKE_CASE__ : int = 1_00_00 SCREAMING_SNAKE_CASE__ : Optional[datasets.Features] = None SCREAMING_SNAKE_CASE__ : Optional[str] = "strict" SCREAMING_SNAKE_CASE__ : Literal["error", "warn", "skip"] = "error" SCREAMING_SNAKE_CASE__ : Optional[str] = None def A_ ( self ): '''simple docstring''' if self.delimiter is not None: UpperCAmelCase : Any = self.delimiter if self.column_names is not None: UpperCAmelCase : Optional[Any] = self.column_names @property def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCamelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = CsvConfig def A_ ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A_ ( self , snake_case ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case , (str, list, tuple) ): UpperCAmelCase : List[str] = data_files if isinstance(snake_case , snake_case ): UpperCAmelCase : List[str] = [files] UpperCAmelCase : Tuple = [dl_manager.iter_files(snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] UpperCAmelCase : Any = [] for split_name, files in data_files.items(): if isinstance(snake_case , snake_case ): UpperCAmelCase : str = [files] UpperCAmelCase : Dict = [dl_manager.iter_files(snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) ) return splits def A_ ( self , snake_case ): '''simple docstring''' if self.config.features is not None: UpperCAmelCase : List[Any] = self.config.features.arrow_schema if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ): # cheaper cast UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCAmelCase : Union[str, Any] = table_cast(snake_case , snake_case ) return pa_table def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCAmelCase : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ): UpperCAmelCase : List[str] = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(snake_case ): UpperCAmelCase : int = pa.Table.from_pandas(snake_case ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(snake_case ) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" ) raise
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' from __future__ import annotations a : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): '''simple docstring''' UpperCAmelCase : Dict = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the reference grid UpperCAmelCase : Optional[Any] = 1 UpperCAmelCase : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the action grid UpperCAmelCase : int = init[0] UpperCAmelCase : Dict = init[1] UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell UpperCAmelCase : List[Any] = [[f, g, x, y]] UpperCAmelCase : Union[str, Any] = False # flag that is set when search is complete UpperCAmelCase : int = False # flag set if we can't find expand while not found and not resign: if len(__magic_name__ ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() UpperCAmelCase : int = cell.pop() UpperCAmelCase : Dict = next_cell[2] UpperCAmelCase : Dict = next_cell[3] UpperCAmelCase : List[Any] = next_cell[1] if x == goal[0] and y == goal[1]: UpperCAmelCase : List[str] = True else: for i in range(len(__magic_name__ ) ): # to try out different valid actions UpperCAmelCase : Optional[int] = x + DIRECTIONS[i][0] UpperCAmelCase : Union[str, Any] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__magic_name__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: UpperCAmelCase : Any = g + cost UpperCAmelCase : Optional[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) UpperCAmelCase : Dict = 1 UpperCAmelCase : int = i UpperCAmelCase : Dict = [] UpperCAmelCase : Tuple = goal[0] UpperCAmelCase : List[str] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: UpperCAmelCase : Tuple = x - DIRECTIONS[action[x][y]][0] UpperCAmelCase : Dict = y - DIRECTIONS[action[x][y]][1] UpperCAmelCase : Optional[int] = xa UpperCAmelCase : List[Any] = ya invpath.append([x, y] ) UpperCAmelCase : Optional[Any] = [] for i in range(len(__magic_name__ ) ): path.append(invpath[len(__magic_name__ ) - 1 - i] ) return path, action if __name__ == "__main__": a : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] a : Tuple = [0, 0] # all coordinates are given in format [y,x] a : int = [len(grid) - 1, len(grid[0]) - 1] a : Tuple = 1 # the cost map which pushes the path closer to the goal a : Optional[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): a : Dict = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map a : List[Any] = 99 a , a : Optional[Any] = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_0 , snake_case=0.02 , snake_case=True , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Tuple = parent UpperCAmelCase : str = batch_size UpperCAmelCase : int = seq_length UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : List[Any] = use_input_mask UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : List[Any] = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : List[Any] = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : str = initializer_range UpperCAmelCase : int = use_labels UpperCAmelCase : Dict = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Tuple = None if self.use_input_mask: UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def A_ ( self ): '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case , initializer_range=self.initializer_range , ) def A_ ( self ): '''simple docstring''' ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : int = self.prepare_config_and_inputs() UpperCAmelCase : Tuple = True UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case , ): '''simple docstring''' UpperCAmelCase : str = BertGenerationEncoder(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[Any] = model(snake_case , attention_mask=snake_case ) UpperCAmelCase : Tuple = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = True UpperCAmelCase : int = BertGenerationEncoder(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) UpperCAmelCase : Dict = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , ): '''simple docstring''' UpperCAmelCase : Tuple = True UpperCAmelCase : int = True UpperCAmelCase : Union[str, Any] = BertGenerationDecoder(config=snake_case ).to(snake_case ).eval() # first forward pass UpperCAmelCase : List[Any] = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , ) UpperCAmelCase : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : str = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )["hidden_states"][0] UpperCAmelCase : Optional[int] = model( snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["hidden_states"][0] # select random slice UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case , ): '''simple docstring''' UpperCAmelCase : Tuple = BertGenerationDecoder(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : List[str] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = BertGenerationEncoderTester(self ) UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() UpperCAmelCase : Dict = "bert" self.model_tester.create_and_check_model(snake_case , snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case ) def A_ ( self ): '''simple docstring''' ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*snake_case ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(snake_case ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCAmelCase : str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase : Optional[int] = model(snake_case )[0] UpperCAmelCase : Any = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Optional[int] = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCAmelCase : str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(snake_case )[0] UpperCAmelCase : Optional[Any] = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : List[str] = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=3_0 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Optional[int] = image_size UpperCAmelCase : int = patch_size UpperCAmelCase : str = num_channels UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Tuple = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : List[Any] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : List[Any] = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : int = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 UpperCAmelCase : Optional[int] = num_patches + 1 def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : int = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = TFViTModel(config=snake_case ) UpperCAmelCase : List[str] = model(snake_case , training=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase : Dict = self.image_size // 2 UpperCAmelCase : Dict = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase : Dict = model(snake_case , interpolate_pos_encoding=snake_case , training=snake_case ) UpperCAmelCase : List[str] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.type_sequence_label_size UpperCAmelCase : Tuple = TFViTForImageClassification(snake_case ) UpperCAmelCase : int = model(snake_case , labels=snake_case , training=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase : Any = self.image_size // 2 UpperCAmelCase : str = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase : Optional[Any] = model(snake_case , interpolate_pos_encoding=snake_case , training=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase : Dict = 1 UpperCAmelCase : str = TFViTForImageClassification(snake_case ) UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Any = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Dict = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = TFViTModelTester(self ) UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Layer ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(snake_case ) UpperCAmelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Tuple = [*signature.parameters.keys()] UpperCAmelCase : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : List[Any] = prepare_img() UpperCAmelCase : Optional[int] = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : List[str] = model(**snake_case ) # verify the logits UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : Optional[Any] = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case , atol=1e-4 )
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : str = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "big_bird" def __init__( self , snake_case=5_0_3_5_8 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu_new" , snake_case=0.1 , snake_case=0.1 , snake_case=4_0_9_6 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=True , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=6_6 , snake_case="block_sparse" , snake_case=True , snake_case=False , snake_case=6_4 , snake_case=3 , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , ) UpperCAmelCase : Union[str, Any] = vocab_size UpperCAmelCase : int = max_position_embeddings UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : int = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Union[str, Any] = type_vocab_size UpperCAmelCase : int = layer_norm_eps UpperCAmelCase : List[Any] = use_cache UpperCAmelCase : List[str] = rescale_embeddings UpperCAmelCase : Dict = attention_type UpperCAmelCase : Dict = use_bias UpperCAmelCase : Dict = block_size UpperCAmelCase : Any = num_random_blocks UpperCAmelCase : Tuple = classifier_dropout class UpperCamelCase__ ( lowercase__ ): """simple docstring""" @property def A_ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=3_0 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=2 , ): '''simple docstring''' UpperCAmelCase : Any = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Any = is_training UpperCAmelCase : str = use_labels UpperCAmelCase : str = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : List[Any] = hidden_act UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : str = scope UpperCAmelCase : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 UpperCAmelCase : Any = num_patches + 2 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = TFDeiTModel(config=snake_case ) UpperCAmelCase : Any = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = TFDeiTForMaskedImageModeling(config=snake_case ) UpperCAmelCase : int = model(snake_case ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase : List[str] = 1 UpperCAmelCase : Optional[int] = TFDeiTForMaskedImageModeling(snake_case ) UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = model(snake_case ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = self.type_sequence_label_size UpperCAmelCase : Optional[Any] = TFDeiTForImageClassification(snake_case ) UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase : int = 1 UpperCAmelCase : Dict = TFDeiTForImageClassification(snake_case ) UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : int = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = TFDeiTModelTester(self ) UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Dense ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def A_ ( self , snake_case , snake_case , snake_case=False ): '''simple docstring''' UpperCAmelCase : Optional[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def A_ ( self ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = TFDeiTModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) UpperCAmelCase : Any = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : int = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : Dict = model(**snake_case ) # verify the logits UpperCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : Dict = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : str = image_size UpperCAmelCase : Optional[int] = num_channels UpperCAmelCase : List[str] = embeddings_size UpperCAmelCase : Dict = hidden_sizes UpperCAmelCase : int = depths UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : List[str] = use_labels UpperCAmelCase : int = hidden_act UpperCAmelCase : List[str] = num_labels UpperCAmelCase : Any = scope UpperCAmelCase : Dict = len(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Optional[int] = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase : Any = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = TFResNetModel(config=snake_case ) UpperCAmelCase : Optional[Any] = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(snake_case ) UpperCAmelCase : int = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = TFResNetModelTester(self ) UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def A_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self ): '''simple docstring''' return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Union[str, Any] = model_class(snake_case ) UpperCAmelCase : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] = [*signature.parameters.keys()] UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case ): UpperCAmelCase : Optional[int] = model_class(snake_case ) UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : Dict = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase : List[Any] = layer_type UpperCAmelCase : str = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Optional[int] = True check_hidden_states_output(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase : Union[str, Any] = self.default_image_processor UpperCAmelCase : Dict = prepare_img() UpperCAmelCase : Union[str, Any] = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : List[Any] = model(**snake_case ) # verify the logits UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' from math import isclose, sqrt def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = point_y / 4 / point_x UpperCAmelCase : Union[str, Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) UpperCAmelCase : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) UpperCAmelCase : List[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 UpperCAmelCase : List[Any] = outgoing_gradient**2 + 4 UpperCAmelCase : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 UpperCAmelCase : Dict = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) UpperCAmelCase : Optional[int] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point UpperCAmelCase : Any = x_minus if isclose(__magic_name__ , __magic_name__ ) else x_plus UpperCAmelCase : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowercase ( __magic_name__ = 1.4 , __magic_name__ = -9.6 ): '''simple docstring''' UpperCAmelCase : int = 0 UpperCAmelCase : float = first_x_coord UpperCAmelCase : float = first_y_coord UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = next_point(__magic_name__ , __magic_name__ , __magic_name__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'{solution() = }')
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("fixtures/test_sentencepiece.model") a : str = {"target_lang": "fi", "source_lang": "en"} a : Tuple = ">>zh<<" a : int = "Helsinki-NLP/" if is_torch_available(): a : Any = "pt" elif is_tf_available(): a : List[str] = "tf" else: a : Optional[Any] = "jax" @require_sentencepiece class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MarianTokenizer SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = True def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : List[str] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] UpperCAmelCase : str = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Union[str, Any] = Path(self.tmpdirname ) save_json(snake_case , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(snake_case , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(snake_case , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(snake_case , save_dir / VOCAB_FILES_NAMES["target_spm"] ) UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self , **snake_case ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return ( "This is a test", "This is a test", ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "</s>" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(snake_case ) , 9 ) def A_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) UpperCAmelCase : List[Any] = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0] self.assertListEqual(snake_case , batch.input_ids[0] ) UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = [x.name for x in Path(snake_case ).glob("*" )] self.assertIn("source.spm" , snake_case ) MarianTokenizer.from_pretrained(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : Union[str, Any] = tok( ["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[str] = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) UpperCAmelCase : Optional[Any] = "Tämä on testi" UpperCAmelCase : List[Any] = "This is a test" UpperCAmelCase : Tuple = [7_6, 7, 2_0_4_7, 2] UpperCAmelCase : int = [6_9, 1_2, 1_1, 9_4_0, 2] UpperCAmelCase : Any = tokenizer(snake_case ).input_ids self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = tokenizer(text_target=snake_case ).input_ids self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = tokenizer.decode(snake_case , skip_special_tokens=snake_case ) self.assertEqual(snake_case , snake_case )
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = image.size UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCAmelCase : int = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) UpperCAmelCase : Tuple = np.array(__magic_name__ ).astype(np.floataa ) / 2_5_5.0 UpperCAmelCase : Tuple = image[None].transpose(0 , 3 , 1 , 2 ) UpperCAmelCase : Optional[Any] = torch.from_numpy(__magic_name__ ) return 2.0 * image - 1.0 class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=snake_case , unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self , snake_case = None , snake_case = 1 , snake_case = 1_0_0 , snake_case = 0.0 , snake_case = None , snake_case = "pil" , snake_case = True , ): '''simple docstring''' if isinstance(snake_case , PIL.Image.Image ): UpperCAmelCase : str = 1 elif isinstance(snake_case , torch.Tensor ): UpperCAmelCase : Optional[int] = image.shape[0] else: raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case )}" ) if isinstance(snake_case , PIL.Image.Image ): UpperCAmelCase : Union[str, Any] = preprocess(snake_case ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCAmelCase : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCAmelCase : Optional[int] = next(self.unet.parameters() ).dtype UpperCAmelCase : Any = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) UpperCAmelCase : Optional[Any] = image.to(device=self.device , dtype=snake_case ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case , device=self.device ) UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : Tuple = eta for t in self.progress_bar(snake_case ): # concat latents and low resolution image in the channel dimension. UpperCAmelCase : str = torch.cat([latents, image] , dim=1 ) UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual UpperCAmelCase : Tuple = self.unet(snake_case , snake_case ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : str = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # decode the image latents with the VQVAE UpperCAmelCase : int = self.vqvae.decode(snake_case ).sample UpperCAmelCase : Union[str, Any] = torch.clamp(snake_case , -1.0 , 1.0 ) UpperCAmelCase : Dict = image / 2 + 0.5 UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : List[Any] = self.numpy_to_pil(snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case )
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' from collections import defaultdict class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCAmelCase : Tuple = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case ) ) ] UpperCAmelCase : Union[str, Any] = defaultdict(snake_case ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCAmelCase : Tuple = (1 << len(snake_case )) - 1 def A_ ( self , snake_case , snake_case ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCAmelCase : List[Any] = self.count_ways_until(snake_case , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCAmelCase : Optional[int] = total_ways_util return self.dp[mask][task_no] def A_ ( self , snake_case ): '''simple docstring''' for i in range(len(snake_case ) ): for j in task_performed[i]: self.task[j].append(snake_case ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": a : Dict = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a : Any = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import csv import tweepy # Twitter API credentials a : Dict = "" a : List[str] = "" a : List[str] = "" a : Tuple = "" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = tweepy.OAuthHandler(__magic_name__ , __magic_name__ ) auth.set_access_token(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = tweepy.API(__magic_name__ ) # initialize a list to hold all the tweepy Tweets UpperCAmelCase : List[Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCAmelCase : int = api.user_timeline(screen_name=__magic_name__ , count=200 ) # save most recent tweets alltweets.extend(__magic_name__ ) # save the id of the oldest tweet less one UpperCAmelCase : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__magic_name__ ) > 0: print(F"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates UpperCAmelCase : str = api.user_timeline( screen_name=__magic_name__ , count=200 , max_id=__magic_name__ ) # save most recent tweets alltweets.extend(__magic_name__ ) # update the id of the oldest tweet less one UpperCAmelCase : Dict = alltweets[-1].id - 1 print(F"...{len(__magic_name__ )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCAmelCase : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"new_{screen_name}_tweets.csv" , "w" ) as f: UpperCAmelCase : Optional[int] = csv.writer(__magic_name__ ) writer.writerow(["id", "created_at", "text"] ) writer.writerows(__magic_name__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("FirePing32")
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Tuple = { "bart": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "layoutlm": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "wav2vec2": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCAmelCase : int = cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) UpperCAmelCase : Optional[Any] = config_class.from_json_file(__magic_name__ ) UpperCAmelCase : List[Any] = True UpperCAmelCase : Tuple = True print(F"Building TensorFlow model from configuration: {config}" ) UpperCAmelCase : int = model_class(__magic_name__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCAmelCase : Union[str, Any] = cached_file( __magic_name__ , __magic_name__ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCAmelCase : Any = load_pytorch_checkpoint_in_tfa_model(__magic_name__ , __magic_name__ ) if compare_with_pt_model: UpperCAmelCase : int = tf_model(tf_model.dummy_inputs , training=__magic_name__ ) # build the network UpperCAmelCase : Dict = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=__magic_name__ , config=__magic_name__ , state_dict=__magic_name__ ) with torch.no_grad(): UpperCAmelCase : Optional[int] = pt_model(**pt_model.dummy_inputs ) UpperCAmelCase : str = pto[0].numpy() UpperCAmelCase : str = tfo[0].numpy() UpperCAmelCase : Any = np.amax(np.abs(np_pt - np_tf ) ) print(F"Max absolute difference between models outputs {diff}" ) assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(F"Save TensorFlow model to {tf_dump_path}" ) tf_model.save_weights(__magic_name__ , save_format="h5" ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=False , ): '''simple docstring''' if args_model_type is None: UpperCAmelCase : Optional[Any] = list(MODEL_CLASSES.keys() ) else: UpperCAmelCase : int = [args_model_type] for j, model_type in enumerate(__magic_name__ , start=1 ): print("=" * 100 ) print(F" Converting model type {j}/{len(__magic_name__ )}: {model_type}" ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCAmelCase : Dict = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCAmelCase : str = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__magic_name__ , __magic_name__ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F" Skipping finetuned checkpoint {model_shortcut_name}" ) continue UpperCAmelCase : Optional[int] = model_shortcut_name elif only_convert_finetuned_models: print(F" Skipping not finetuned checkpoint {model_shortcut_name}" ) continue print( F" Converting checkpoint {i}/{len(__magic_name__ )}: {model_shortcut_name} - model_type {model_type}" ) print("-" * 100 ) if config_shortcut_name in aws_config_map: UpperCAmelCase : Dict = cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) else: UpperCAmelCase : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCAmelCase : Optional[Any] = cached_file(__magic_name__ , __magic_name__ , force_download=not use_cached_models ) else: UpperCAmelCase : str = model_shortcut_name if os.path.isfile(__magic_name__ ): UpperCAmelCase : List[str] = "converted_model" convert_pt_checkpoint_to_tf( model_type=__magic_name__ , pytorch_checkpoint_path=__magic_name__ , config_file=__magic_name__ , tf_dump_path=os.path.join(__magic_name__ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__magic_name__ , ) if remove_cached_files: os.remove(__magic_name__ ) os.remove(__magic_name__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' "convert all the models from AWS." ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help=( "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--config_file", default=None, type=str, help=( "The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name " "use the configuration associated to the shortcut name on the AWS" ), ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") a : Tuple = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' from __future__ import annotations def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = str(__magic_name__ ) return n == n[::-1] def lowercase ( __magic_name__ = 100_0000 ): '''simple docstring''' UpperCAmelCase : Tuple = 0 for i in range(1 , __magic_name__ ): if is_palindrome(__magic_name__ ) and is_palindrome(bin(__magic_name__ ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : str = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "pix2struct_text_model" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["past_key_values"] SCREAMING_SNAKE_CASE__ : int = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case=5_0_2_4_4 , snake_case=7_6_8 , snake_case=6_4 , snake_case=2_0_4_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_2 , snake_case=1_2_8 , snake_case=0.1 , snake_case=1e-6 , snake_case=1.0 , snake_case="gelu_new" , snake_case=0 , snake_case=False , snake_case=0 , snake_case=1 , snake_case=False , snake_case=True , **snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = d_kv UpperCAmelCase : Tuple = d_ff UpperCAmelCase : Union[str, Any] = num_layers UpperCAmelCase : Optional[Any] = num_heads UpperCAmelCase : Tuple = relative_attention_num_buckets UpperCAmelCase : Optional[Any] = relative_attention_max_distance UpperCAmelCase : Optional[Any] = dropout_rate UpperCAmelCase : List[str] = layer_norm_epsilon UpperCAmelCase : Tuple = initializer_factor UpperCAmelCase : Optional[Any] = use_cache UpperCAmelCase : int = eos_token_id UpperCAmelCase : Union[str, Any] = decoder_start_token_id # for backwards compatibility UpperCAmelCase : Union[str, Any] = dense_act_fn super().__init__( pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , tie_word_embeddings=snake_case , is_decoder=snake_case , **snake_case , ) @classmethod def A_ ( cls , snake_case , **snake_case ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) UpperCAmelCase , UpperCAmelCase : int = cls.get_config_dict(snake_case , **snake_case ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase : Dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case , **snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "pix2struct_vision_model" def __init__( self , snake_case=7_6_8 , snake_case=7_6_8 , snake_case=2_0_4_8 , snake_case=6_4 , snake_case=1_2 , snake_case=1_2 , snake_case="gelu_new" , snake_case=1e-6 , snake_case=0.0 , snake_case=0.0 , snake_case=1e-10 , snake_case=1.0 , snake_case=4_0_9_6 , snake_case=3_2 , snake_case=1_2_8 , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Union[str, Any] = patch_embed_hidden_size UpperCAmelCase : int = d_ff UpperCAmelCase : Tuple = dropout_rate UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : str = initializer_range UpperCAmelCase : Tuple = initializer_factor UpperCAmelCase : Any = attention_dropout UpperCAmelCase : Optional[Any] = layer_norm_eps UpperCAmelCase : int = dense_act_fn UpperCAmelCase : Dict = seq_len UpperCAmelCase : Optional[int] = relative_attention_num_buckets UpperCAmelCase : List[Any] = relative_attention_max_distance UpperCAmelCase : Dict = d_kv @classmethod def A_ ( cls , snake_case , **snake_case ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case , **snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "pix2struct" SCREAMING_SNAKE_CASE__ : Any = True def __init__( self , snake_case=None , snake_case=None , snake_case=1.0 , snake_case=0.02 , snake_case=False , snake_case=False , snake_case=True , **snake_case , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case , is_encoder_decoder=snake_case , **snake_case ) if text_config is None: UpperCAmelCase : Optional[Any] = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: UpperCAmelCase : Union[str, Any] = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) UpperCAmelCase : str = PixaStructTextConfig(**snake_case ) UpperCAmelCase : List[Any] = PixaStructVisionConfig(**snake_case ) UpperCAmelCase : List[str] = self.text_config.decoder_start_token_id UpperCAmelCase : Any = self.text_config.pad_token_id UpperCAmelCase : str = self.text_config.eos_token_id UpperCAmelCase : Dict = initializer_factor UpperCAmelCase : str = initializer_range UpperCAmelCase : Optional[Any] = self.initializer_range UpperCAmelCase : Union[str, Any] = self.initializer_range UpperCAmelCase : List[str] = is_vqa @classmethod def A_ ( cls , snake_case , snake_case , **snake_case ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase : Union[str, Any] = self.text_config.to_dict() UpperCAmelCase : List[Any] = self.vision_config.to_dict() UpperCAmelCase : Any = self.__class__.model_type return output
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) a : Dict = logging.getLogger(__name__) a : Tuple = {"facebook/bart-base": BartForConditionalGeneration} a : Optional[Any] = {"facebook/bart-base": BartTokenizer} def lowercase ( ): '''simple docstring''' UpperCAmelCase : Dict = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=__magic_name__ , default=__magic_name__ , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=__magic_name__ , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=__magic_name__ , default=__magic_name__ , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__magic_name__ , ) parser.add_argument( "--config_name" , type=__magic_name__ , default=__magic_name__ , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=__magic_name__ , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=__magic_name__ , default=__magic_name__ , help="Where to store the final ONNX file." ) UpperCAmelCase : str = parser.parse_args() return args def lowercase ( __magic_name__ , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : str = model_dict[model_name].from_pretrained(__magic_name__ ).to(__magic_name__ ) UpperCAmelCase : Tuple = tokenizer_dict[model_name].from_pretrained(__magic_name__ ) if model_name in ["facebook/bart-base"]: UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : List[str] = None UpperCAmelCase : Optional[int] = 0 return huggingface_model, tokenizer def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' model.eval() UpperCAmelCase : List[str] = None UpperCAmelCase : Dict = torch.jit.script(BARTBeamSearchGenerator(__magic_name__ ) ) with torch.no_grad(): UpperCAmelCase : Any = "My friends are cool but they eat too many carbs." UpperCAmelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device ) UpperCAmelCase : int = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__magic_name__ , max_length=__magic_name__ , early_stopping=__magic_name__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __magic_name__ , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , __magic_name__ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=__magic_name__ , ) logger.info("Model exported to {}".format(__magic_name__ ) ) UpperCAmelCase : Any = remove_dup_initializers(os.path.abspath(__magic_name__ ) ) logger.info("Deduplicated and optimized model written to {}".format(__magic_name__ ) ) UpperCAmelCase : List[str] = onnxruntime.InferenceSession(__magic_name__ ) UpperCAmelCase : Union[str, Any] = ort_sess.run( __magic_name__ , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(__magic_name__ ), "max_length": np.array(__magic_name__ ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[Any] = parse_args() UpperCAmelCase : Any = 5 UpperCAmelCase : Optional[Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() UpperCAmelCase : str = torch.device(args.device ) UpperCAmelCase , UpperCAmelCase : Tuple = load_model_tokenizer(args.model_name_or_path , __magic_name__ ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(__magic_name__ ) if args.max_length: UpperCAmelCase : List[Any] = args.max_length if args.num_beams: UpperCAmelCase : Dict = args.num_beams if args.output_file_path: UpperCAmelCase : int = args.output_file_path else: UpperCAmelCase : int = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a : Any = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ["pixel_values"] def __init__( self , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = 8 , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Optional[Any] = do_rescale UpperCAmelCase : List[str] = rescale_factor UpperCAmelCase : Optional[int] = do_pad UpperCAmelCase : str = pad_size def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case ): '''simple docstring''' return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = get_image_size(snake_case ) UpperCAmelCase : Tuple = (old_height // size + 1) * size - old_height UpperCAmelCase : List[Any] = (old_width // size + 1) * size - old_width return pad(snake_case , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case ) def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ): '''simple docstring''' UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : Dict = do_pad if do_pad is not None else self.do_pad UpperCAmelCase : List[str] = pad_size if pad_size is not None else self.pad_size UpperCAmelCase : Optional[Any] = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. UpperCAmelCase : Optional[int] = [to_numpy_array(snake_case ) for image in images] if do_rescale: UpperCAmelCase : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_pad: UpperCAmelCase : str = [self.pad(snake_case , size=snake_case ) for image in images] UpperCAmelCase : Optional[int] = [to_channel_dimension_format(snake_case , snake_case ) for image in images] UpperCAmelCase : Union[str, Any] = {"pixel_values": images} return BatchFeature(data=snake_case , tensor_type=snake_case )
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' assert isinstance(__magic_name__ , __magic_name__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = tmp_path / "cache" UpperCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : Optional[int] = SqlDatasetReader( "dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read() _check_sql_dataset(__magic_name__ , __magic_name__ ) @require_sqlalchemy @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = tmp_path / "cache" UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features UpperCAmelCase : int = ( Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read() _check_sql_dataset(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con: UpperCAmelCase : Tuple = con.cursor() cur.execute("SELECT * FROM dataset" ) for row in cur: yield row @require_sqlalchemy def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = tmp_path / "cache" UpperCAmelCase : str = os.path.join(__magic_name__ , "tmp.sql" ) UpperCAmelCase : int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read() SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write() UpperCAmelCase : Union[str, Any] = iter_sql_file(__magic_name__ ) UpperCAmelCase : List[str] = iter_sql_file(__magic_name__ ) for rowa, rowa in zip(__magic_name__ , __magic_name__ ): assert rowa == rowa @require_sqlalchemy def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = tmp_path / "cache" UpperCAmelCase : List[Any] = os.path.join(__magic_name__ , "tmp.sql" ) UpperCAmelCase : Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read() SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write() UpperCAmelCase : List[Any] = iter_sql_file(__magic_name__ ) UpperCAmelCase : int = iter_sql_file(__magic_name__ ) for rowa, rowa in zip(__magic_name__ , __magic_name__ ): assert rowa == rowa @require_sqlalchemy def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = tmp_path / "cache" UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , "tmp.sql" ) UpperCAmelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read() with pytest.raises(__magic_name__ ): SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase : Tuple = image_classifier(snake_case , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(snake_case ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) UpperCAmelCase : List[str] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case ) , [ [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], ] , ) @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase : int = image_classifier(snake_case , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(snake_case ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) UpperCAmelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case ) , [ [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], [ {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, {"score": 0.333, "label": ANY(snake_case )}, ], ] , ) @slow @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase : Any = image_classifier(snake_case , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(snake_case ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) UpperCAmelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase : Tuple = image_classifier(snake_case , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(snake_case ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) UpperCAmelCase : Dict = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' from __future__ import annotations from random import random class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case = None ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = value UpperCAmelCase : Optional[int] = random() UpperCAmelCase : Node | None = None UpperCAmelCase : Node | None = None def __repr__( self ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f"'{self.value}: {self.prior:.5}'" else: return pformat( {f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 ) def __str__( self ): '''simple docstring''' UpperCAmelCase : Tuple = str(self.value ) + " " UpperCAmelCase : Dict = str(self.left or "" ) UpperCAmelCase : Optional[Any] = str(self.right or "" ) return value + left + right def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCAmelCase , UpperCAmelCase : Optional[int] = split(root.left , __magic_name__ ) return left, root else: UpperCAmelCase , UpperCAmelCase : List[str] = split(root.right , __magic_name__ ) return root, right def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCAmelCase : Union[str, Any] = merge(left.right , __magic_name__ ) return left else: UpperCAmelCase : Any = merge(__magic_name__ , right.left ) return right def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Node(__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = split(__magic_name__ , __magic_name__ ) return merge(merge(__magic_name__ , __magic_name__ ) , __magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = split(__magic_name__ , value - 1 ) UpperCAmelCase , UpperCAmelCase : int = split(__magic_name__ , __magic_name__ ) return merge(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": UpperCAmelCase : Dict = insert(__magic_name__ , int(arg[1:] ) ) elif arg[0] == "-": UpperCAmelCase : Tuple = erase(__magic_name__ , int(arg[1:] ) ) else: print("Unknown command" ) return root def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) UpperCAmelCase : Optional[Any] = input() while args != "q": UpperCAmelCase : List[Any] = interact_treap(__magic_name__ , __magic_name__ ) print(__magic_name__ ) UpperCAmelCase : Union[str, Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a : List[Any] = logging.get_logger(__name__) a : List[Any] = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class UpperCamelCase__ ( lowercase__ ): """simple docstring""" @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ): '''simple docstring''' raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Tuple = max_length UpperCAmelCase : Optional[int] = max_position_embeddings @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : int = input_ids.shape[-1] UpperCAmelCase : int = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " "with `max_length = start_length + max_new_tokens` instead." , snake_case , ) UpperCAmelCase : Union[str, Any] = start_length UpperCAmelCase : List[Any] = max_new_tokens UpperCAmelCase : Optional[int] = start_length + max_new_tokens @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ): '''simple docstring''' return input_ids.shape[-1] >= self.max_length class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Optional[Any] = max_time UpperCAmelCase : Union[str, Any] = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ): '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase__ ( lowercase__ ): """simple docstring""" @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ): '''simple docstring''' return any(criteria(snake_case , snake_case ) for criteria in self ) @property def A_ ( self ): '''simple docstring''' for stopping_criterium in self: if isinstance(snake_case , snake_case ): return stopping_criterium.max_length elif isinstance(snake_case , snake_case ): return stopping_criterium.max_length return None def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = stopping_criteria.max_length UpperCAmelCase : str = deepcopy(__magic_name__ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , __magic_name__ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=__magic_name__ ) ) return new_stopping_criteria
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = BertTokenizer SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizerFast SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = filter_non_english def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Optional[int] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = "UNwant\u00E9d,running" UpperCAmelCase : Union[str, Any] = "unwanted, running" return input_text, output_text def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : str = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def A_ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase : Tuple = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = "UNwant\u00E9d,running" UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(snake_case ) UpperCAmelCase : Tuple = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Union[str, Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : str = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() UpperCAmelCase : List[str] = tokenizer.encode(snake_case ) UpperCAmelCase : str = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) # With lower casing UpperCAmelCase : Optional[int] = self.get_tokenizer(do_lower_case=snake_case ) UpperCAmelCase : Any = self.get_rust_tokenizer(do_lower_case=snake_case ) UpperCAmelCase : List[str] = "UNwant\u00E9d,running" UpperCAmelCase : Dict = tokenizer.tokenize(snake_case ) UpperCAmelCase : Any = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : List[str] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : str = tokenizer.encode(snake_case ) UpperCAmelCase : List[str] = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = BasicTokenizer(do_lower_case=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = BasicTokenizer(do_lower_case=snake_case , strip_accents=snake_case ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = BasicTokenizer() UpperCAmelCase : Union[str, Any] = "a\n'll !!to?'d of, can't." UpperCAmelCase : List[Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase : List[str] = {} for i, token in enumerate(snake_case ): UpperCAmelCase : str = i UpperCAmelCase : Union[str, Any] = WordpieceTokenizer(vocab=snake_case , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def A_ ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def A_ ( self ): '''simple docstring''' self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def A_ ( self ): '''simple docstring''' self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=snake_case ) UpperCAmelCase : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case ) UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(snake_case ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def A_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : List[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." UpperCAmelCase : List[str] = tokenizer_r.encode_plus( snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case , ) UpperCAmelCase : Tuple = tokenizer_r.do_lower_case if hasattr(snake_case , "do_lower_case" ) else False UpperCAmelCase : str = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ["的", "人", "有"] UpperCAmelCase : Union[str, Any] = "".join(snake_case ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase : Optional[int] = True UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : Union[str, Any] = tokenizer_p.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : str = tokenizer_r.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(snake_case ) UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(snake_case ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case , snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[Any] = False UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : int = tokenizer_r.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : Optional[Any] = tokenizer_p.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : str = tokenizer_r.convert_ids_to_tokens(snake_case ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(snake_case ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase : List[Any] = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(snake_case ) ] self.assertListEqual(snake_case , snake_case ) self.assertListEqual(snake_case , snake_case )
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} a : List[Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } a : List[str] = { "squeezebert/squeezebert-uncased": 5_12, "squeezebert/squeezebert-mnli": 5_12, "squeezebert/squeezebert-mnli-headless": 5_12, } a : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : int = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Dict = SqueezeBertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): UpperCAmelCase : Dict = getattr(snake_case , normalizer_state.pop("type" ) ) UpperCAmelCase : Optional[int] = do_lower_case UpperCAmelCase : Optional[int] = strip_accents UpperCAmelCase : Any = tokenize_chinese_chars UpperCAmelCase : Any = normalizer_class(**snake_case ) UpperCAmelCase : Optional[int] = do_lower_case def A_ ( self , snake_case , snake_case=None ): '''simple docstring''' UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Optional[int] = [self.sep_token_id] UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Any = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run a : Optional[int] = True except (ImportError, AttributeError): a : str = object def lowercase ( *__magic_name__ , **__magic_name__ ): '''simple docstring''' pass a : List[Any] = False a : List[Any] = logging.get_logger("transformers-cli/serving") def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(__magic_name__ , args.host , args.port , args.workers ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : dict class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] SCREAMING_SNAKE_CASE__ : Optional[List[int]] class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any class UpperCamelCase__ ( lowercase__ ): """simple docstring""" @staticmethod def A_ ( snake_case ): '''simple docstring''' UpperCAmelCase : Dict = parser.add_parser( "serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." ) serve_parser.add_argument( "--task" , type=snake_case , choices=get_supported_tasks() , help="The task to run the pipeline on" , ) serve_parser.add_argument("--host" , type=snake_case , default="localhost" , help="Interface the server will listen on." ) serve_parser.add_argument("--port" , type=snake_case , default=8_8_8_8 , help="Port the serving will listen to." ) serve_parser.add_argument("--workers" , type=snake_case , default=1 , help="Number of http workers" ) serve_parser.add_argument("--model" , type=snake_case , help="Model's name or path to stored model." ) serve_parser.add_argument("--config" , type=snake_case , help="Model's config name or path to stored model." ) serve_parser.add_argument("--tokenizer" , type=snake_case , help="Tokenizer name to use." ) serve_parser.add_argument( "--device" , type=snake_case , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) serve_parser.set_defaults(func=snake_case ) def __init__( self , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Any = pipeline UpperCAmelCase : List[Any] = host UpperCAmelCase : Optional[Any] = port UpperCAmelCase : int = workers if not _serve_dependencies_installed: raise RuntimeError( "Using serve command requires FastAPI and uvicorn. " "Please install transformers with [serving]: pip install \"transformers[serving]\"." "Or install FastAPI and uvicorn separately." ) else: logger.info(f"Serving model over {host}:{port}" ) UpperCAmelCase : Optional[int] = FastAPI( routes=[ APIRoute( "/" , self.model_info , response_model=snake_case , response_class=snake_case , methods=["GET"] , ), APIRoute( "/tokenize" , self.tokenize , response_model=snake_case , response_class=snake_case , methods=["POST"] , ), APIRoute( "/detokenize" , self.detokenize , response_model=snake_case , response_class=snake_case , methods=["POST"] , ), APIRoute( "/forward" , self.forward , response_model=snake_case , response_class=snake_case , methods=["POST"] , ), ] , timeout=6_0_0 , ) def A_ ( self ): '''simple docstring''' run(self._app , host=self.host , port=self.port , workers=self.workers ) def A_ ( self ): '''simple docstring''' return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def A_ ( self , snake_case = Body(snake_case , embed=snake_case ) , snake_case = Body(snake_case , embed=snake_case ) ): '''simple docstring''' try: UpperCAmelCase : Tuple = self._pipeline.tokenizer.tokenize(snake_case ) if return_ids: UpperCAmelCase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case ) return ServeTokenizeResult(tokens=snake_case , tokens_ids=snake_case ) else: return ServeTokenizeResult(tokens=snake_case ) except Exception as e: raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(snake_case )} ) def A_ ( self , snake_case = Body(snake_case , embed=snake_case ) , snake_case = Body(snake_case , embed=snake_case ) , snake_case = Body(snake_case , embed=snake_case ) , ): '''simple docstring''' try: UpperCAmelCase : Dict = self._pipeline.tokenizer.decode(snake_case , snake_case , snake_case ) return ServeDeTokenizeResult(model="" , text=snake_case ) except Exception as e: raise HTTPException(status_code=5_0_0 , detail={"model": "", "error": str(snake_case )} ) async def A_ ( self , snake_case=Body(snake_case , embed=snake_case ) ): '''simple docstring''' if len(snake_case ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model UpperCAmelCase : Dict = self._pipeline(snake_case ) return ServeForwardResult(output=snake_case ) except Exception as e: raise HTTPException(5_0_0 , {"error": str(snake_case )} )
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] ) UpperCAmelCase : int = np.array(__magic_name__ ) UpperCAmelCase : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = (1, 2, 1) UpperCAmelCase : Any = (1, 1, 0, 7) UpperCAmelCase : Union[str, Any] = SARIMAX( __magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ ) UpperCAmelCase : Union[str, Any] = model.fit(disp=__magic_name__ , maxiter=600 , method="nm" ) UpperCAmelCase : Optional[Any] = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] ) return result[0] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = regressor.predict(__magic_name__ ) return y_pred[0] def lowercase ( __magic_name__ ): '''simple docstring''' train_user.sort() UpperCAmelCase : List[Any] = np.percentile(__magic_name__ , 25 ) UpperCAmelCase : int = np.percentile(__magic_name__ , 75 ) UpperCAmelCase : Any = qa - qa UpperCAmelCase : Optional[int] = qa - (iqr * 0.1) return low_lim def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = 0 UpperCAmelCase : Any = 0 for i in list_vote: if i > actual_result: UpperCAmelCase : Any = not_safe + 1 else: if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) a : Union[str, Any] = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] a : List[Any] = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) a : Dict = Normalizer().fit_transform(data_input_df.values) # split data a : List[Any] = normalize_df[:, 2].tolist() a : Dict = normalize_df[:, 0].tolist() a : Optional[Any] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) a : Optional[int] = normalize_df[:, [1, 2]].tolist() a : Dict = x[: len(x) - 1] a : Tuple = x[len(x) - 1 :] # for linear regression & sarimax a : str = total_date[: len(total_date) - 1] a : Optional[int] = total_user[: len(total_user) - 1] a : int = total_match[: len(total_match) - 1] a : List[Any] = total_date[len(total_date) - 1 :] a : Union[str, Any] = total_user[len(total_user) - 1 :] a : Any = total_match[len(total_match) - 1 :] # voting system with forecasting a : int = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data a : Union[str, Any] = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Tuple = { "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' from torch import nn def lowercase ( __magic_name__ ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"Unsupported activation function: {act_fn}" )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype ) UpperCAmelCase : Union[str, Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : str = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = tf.convert_to_tensor(__magic_name__ ) return x * tf.tanh(tf.math.softplus(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : List[Any] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : List[Any] = tf.cast(1.7_0_2 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( __magic_name__ ): '''simple docstring''' return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 ) def lowercase ( __magic_name__ , __magic_name__=-1 ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = tf.split(__magic_name__ , 2 , axis=__magic_name__ ) return a * tf.math.sigmoid(__magic_name__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( __magic_name__ ): '''simple docstring''' return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ ) a : Any = tf.keras.activations.gelu a : Optional[int] = approximate_gelu_wrap else: a : Optional[int] = _gelu a : str = _gelu_new a : List[Any] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( __magic_name__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' from __future__ import annotations class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = data UpperCAmelCase : Node | None = None UpperCAmelCase : Node | None = None def lowercase ( __magic_name__ ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase ( __magic_name__ ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase ( __magic_name__ ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase ( ): # Main function for testing. '''simple docstring''' UpperCAmelCase : Any = Node(1 ) UpperCAmelCase : List[str] = Node(2 ) UpperCAmelCase : Union[str, Any] = Node(3 ) UpperCAmelCase : List[str] = Node(4 ) UpperCAmelCase : Any = Node(5 ) UpperCAmelCase : Any = Node(6 ) UpperCAmelCase : Optional[int] = Node(7 ) UpperCAmelCase : Any = Node(8 ) UpperCAmelCase : int = Node(9 ) print(is_full_binary_tree(__magic_name__ ) ) print(depth_of_tree(__magic_name__ ) ) print("Tree is: " ) display(__magic_name__ ) if __name__ == "__main__": main()
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a : Dict = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") a : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) a : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def lowercase ( __magic_name__ ): '''simple docstring''' with open(__magic_name__ , "rb" ) as f: UpperCAmelCase : Any = Image.open(__magic_name__ ) return im.convert("RGB" ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." } , ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ , metadata={"help": "A folder containing the training data."} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ , metadata={"help": "A folder containing the validation data."} ) SCREAMING_SNAKE_CASE__ : Optional[float] = field( default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def A_ ( self ): '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : str = field( default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase__ )} , ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) SCREAMING_SNAKE_CASE__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) SCREAMING_SNAKE_CASE__ : str = field(default=lowercase__ , metadata={"help": "Name or path of preprocessor config."} ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = torch.stack([example["pixel_values"] for example in examples] ) UpperCAmelCase : Optional[Any] = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , __magic_name__ , __magic_name__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase : Tuple = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: UpperCAmelCase : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: UpperCAmelCase : Union[str, Any] = {} if data_args.train_dir is not None: UpperCAmelCase : Optional[int] = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: UpperCAmelCase : List[Any] = os.path.join(data_args.validation_dir , "**" ) UpperCAmelCase : Optional[Any] = load_dataset( "imagefolder" , data_files=__magic_name__ , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase : List[str] = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0: UpperCAmelCase : int = dataset["train"].train_test_split(data_args.train_val_split ) UpperCAmelCase : Union[str, Any] = split["train"] UpperCAmelCase : Any = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCAmelCase : str = dataset["train"].features["labels"].names UpperCAmelCase , UpperCAmelCase : List[str] = {}, {} for i, label in enumerate(__magic_name__ ): UpperCAmelCase : Tuple = str(__magic_name__ ) UpperCAmelCase : Optional[Any] = label # Load the accuracy metric from the datasets package UpperCAmelCase : List[Any] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__magic_name__ ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase : int = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: UpperCAmelCase : List[Any] = image_processor.size["shortest_edge"] else: UpperCAmelCase : Union[str, Any] = (image_processor.size["height"], image_processor.size["width"]) UpperCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) UpperCAmelCase : Tuple = Compose( [ RandomResizedCrop(__magic_name__ ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) UpperCAmelCase : Optional[Any] = Compose( [ Resize(__magic_name__ ), CenterCrop(__magic_name__ ), ToTensor(), normalize, ] ) def train_transforms(__magic_name__ ): UpperCAmelCase : Union[str, Any] = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(__magic_name__ ): UpperCAmelCase : Tuple = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: UpperCAmelCase : Optional[int] = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__magic_name__ ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: UpperCAmelCase : Tuple = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__magic_name__ ) # Initalize our trainer UpperCAmelCase : str = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: UpperCAmelCase : Optional[int] = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase : Union[str, Any] = last_checkpoint UpperCAmelCase : str = trainer.train(resume_from_checkpoint=__magic_name__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase : List[str] = trainer.evaluate() trainer.log_metrics("eval" , __magic_name__ ) trainer.save_metrics("eval" , __magic_name__ ) # Write model card and (optionally) push to hub UpperCAmelCase : Optional[int] = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) if __name__ == "__main__": main()
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase ( __magic_name__ ): '''simple docstring''' return np.dot(__magic_name__ , __magic_name__ ) class UpperCamelCase__ : """simple docstring""" def __init__( self , *, snake_case = np.inf , snake_case = "linear" , snake_case = 0.0 , ): '''simple docstring''' UpperCAmelCase : Any = regularization UpperCAmelCase : List[Any] = gamma if kernel == "linear": UpperCAmelCase : str = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase : int = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase : int = f"Unknown kernel: {kernel}" raise ValueError(snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' return np.dot(snake_case , snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = observations UpperCAmelCase : Dict = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase) , ) : Optional[int] = np.shape(snake_case ) def to_minimize(snake_case ) -> float: UpperCAmelCase : str = 0 ((UpperCAmelCase) , ) : List[str] = np.shape(snake_case ) for i in range(snake_case ): for j in range(snake_case ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(snake_case ) UpperCAmelCase : List[Any] = LinearConstraint(snake_case , 0 , 0 ) UpperCAmelCase : str = Bounds(0 , self.regularization ) UpperCAmelCase : Tuple = minimize( snake_case , np.ones(snake_case ) , bounds=snake_case , constraints=[ly_contraint] ).x UpperCAmelCase : Any = l_star # calculating mean offset of separation plane to points UpperCAmelCase : Union[str, Any] = 0 for i in range(snake_case ): for j in range(snake_case ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase : List[str] = s / n def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , snake_case ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : List[str] = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : int = Namespace(**checkpoint["cfg"]["model"] ) UpperCAmelCase : Union[str, Any] = checkpoint["model"] remove_ignore_keys_(__magic_name__ ) UpperCAmelCase : Any = state_dict["decoder.embed_tokens.weight"].shape[0] UpperCAmelCase : List[Any] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} UpperCAmelCase : Any = XGLMConfig( vocab_size=__magic_name__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase : Dict = XGLMForCausalLM(__magic_name__ ) UpperCAmelCase : List[Any] = model.load_state_dict(__magic_name__ , strict=__magic_name__ ) print(__magic_name__ ) UpperCAmelCase : str = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") a : Union[str, Any] = parser.parse_args() a : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1