code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' _lowerCAmelCase = {str(digit): digit**5 for digit in range(10)} def __lowerCAmelCase ( snake_case__ ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) ) def __lowerCAmelCase ( ): return sum( number for number in range(1_000 , 1_000_000 ) if number == digits_fifth_powers_sum(snake_case__ ) ) if __name__ == "__main__": print(solution())
298
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ :Dict = logging.get_logger(__name__) def lowercase_ (A : Optional[Any] , A : Any=False ): snake_case__ : List[Any] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): snake_case__ : str = 'segformer.encoder.' + key if key.startswith('backbone' ): snake_case__ : str = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case__ : Optional[int] = key[key.find('patch_embed' ) + len('patch_embed' )] snake_case__ : int = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(A )-1}''' ) if "norm" in key: snake_case__ : Optional[int] = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case__ : Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] snake_case__ : Union[str, Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(A )-1}''' ) if "layer_norm1" in key: snake_case__ : List[Any] = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: snake_case__ : List[Any] = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 snake_case__ : List[Any] = key[key.find('block' ) + len('block' )] snake_case__ : List[Any] = key.replace(F'''block{idx}''' , F'''block.{int(A )-1}''' ) if "attn.q" in key: snake_case__ : int = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: snake_case__ : str = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: snake_case__ : Optional[int] = key.replace('attn' , 'attention.self' ) if "fc1" in key: snake_case__ : str = key.replace('fc1' , 'dense1' ) if "fc2" in key: snake_case__ : Dict = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: snake_case__ : Union[str, Any] = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: snake_case__ : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' ) snake_case__ : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case__ : Optional[int] = key[key.find('linear_c' ) + len('linear_c' )] snake_case__ : Tuple = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(A )-1}''' ) if key.startswith('head' ): snake_case__ : Tuple = key.replace('head' , 'classifier' ) snake_case__ : Optional[int] = value return new_state_dict def lowercase_ (A : Tuple , A : Optional[int] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case__ : List[str] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) snake_case__ : Optional[Any] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict snake_case__ : str = kv_weight[ : config.hidden_sizes[i], : ] snake_case__ : Dict = kv_bias[: config.hidden_sizes[i]] snake_case__ : List[str] = kv_weight[ config.hidden_sizes[i] :, : ] snake_case__ : List[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowercase_ (): snake_case__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : Dict = Image.open(requests.get(A , stream=A ).raw ) return image @torch.no_grad() def lowercase_ (A : Any , A : Union[str, Any] , A : Optional[Any] ): snake_case__ : List[str] = SegformerConfig() snake_case__ : Dict = False # set attributes based on model_name snake_case__ : Optional[int] = 'huggingface/label-files' if "segformer" in model_name: snake_case__ : str = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: snake_case__ : Optional[int] = 1_5_0 snake_case__ : int = 'ade20k-id2label.json' snake_case__ : List[Any] = (1, 1_5_0, 1_2_8, 1_2_8) elif "city" in model_name: snake_case__ : str = 1_9 snake_case__ : List[str] = 'cityscapes-id2label.json' snake_case__ : Optional[Any] = (1, 1_9, 1_2_8, 1_2_8) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: snake_case__ : str = True snake_case__ : Union[str, Any] = model_name[4:6] snake_case__ : Optional[Any] = 1_0_0_0 snake_case__ : Optional[int] = 'imagenet-1k-id2label.json' snake_case__ : List[Any] = (1, 1_0_0_0) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes snake_case__ : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[Any] = {int(A ): v for k, v in idalabel.items()} snake_case__ : Union[str, Any] = idalabel snake_case__ : Tuple = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": snake_case__ : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] snake_case__ : Tuple = 2_5_6 elif size == "b2": snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2] snake_case__ : int = 7_6_8 snake_case__ : List[Any] = [3, 4, 6, 3] elif size == "b3": snake_case__ : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] snake_case__ : int = 7_6_8 snake_case__ : Optional[Any] = [3, 4, 1_8, 3] elif size == "b4": snake_case__ : str = [6_4, 1_2_8, 3_2_0, 5_1_2] snake_case__ : Optional[Any] = 7_6_8 snake_case__ : Union[str, Any] = [3, 8, 2_7, 3] elif size == "b5": snake_case__ : List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2] snake_case__ : Optional[Any] = 7_6_8 snake_case__ : Any = [3, 6, 4_0, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) snake_case__ : Dict = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) # prepare image snake_case__ : List[str] = prepare_img() snake_case__ : Dict = image_processor(images=A , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: snake_case__ : Tuple = torch.load(A , map_location=torch.device('cpu' ) ) else: snake_case__ : int = torch.load(A , map_location=torch.device('cpu' ) )['state_dict'] # rename keys snake_case__ : List[Any] = rename_keys(A , encoder_only=A ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(A , A ) # create HuggingFace model and load state dict if encoder_only: snake_case__ : str = False snake_case__ : List[Any] = SegformerForImageClassification(A ) else: snake_case__ : Dict = SegformerForSemanticSegmentation(A ) model.load_state_dict(A ) model.eval() # forward pass snake_case__ : int = model(A ) snake_case__ : Any = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": snake_case__ : Dict = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": snake_case__ : Optional[int] = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": snake_case__ : List[Any] = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": snake_case__ : Union[str, Any] = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": snake_case__ : Dict = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": snake_case__ : List[Any] = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": snake_case__ : str = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": snake_case__ : Tuple = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": snake_case__ : Any = torch.tensor( [ [ [-1.1_372e01, -1.2_787e01, -1.3_477e01], [-1.2_536e01, -1.4_194e01, -1.4_409e01], [-1.3_217e01, -1.4_888e01, -1.5_327e01], ], [ [-1.4_791e01, -1.7_122e01, -1.8_277e01], [-1.7_163e01, -1.9_192e01, -1.9_533e01], [-1.7_897e01, -1.9_991e01, -2.0_315e01], ], [ [7.6_723e-01, 4.1_921e-01, -7.7_878e-02], [4.7_772e-01, 9.5_557e-03, -2.8_082e-01], [3.6_032e-01, -2.4_826e-01, -5.1_168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": snake_case__ : Optional[int] = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": snake_case__ : Union[str, Any] = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": snake_case__ : List[str] = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": snake_case__ : List[Any] = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": snake_case__ : str = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": snake_case__ : List[str] = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: snake_case__ : Tuple = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) if __name__ == "__main__": a_ :Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) a_ :Union[str, Any] = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
277
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } _snake_case = { "moussaKam/mbarthez": 1024, "moussaKam/barthez": 1024, "moussaKam/barthez-orangesum-title": 1024, } _snake_case = "▁" class lowercase ( lowerCAmelCase_ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ["input_ids", "attention_mask"] _a = BarthezTokenizer def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it _A : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , ) _A : List[Any] = vocab_file _A : Optional[Any] = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A : Dict = [self.cls_token_id] _A : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self , _a , _a = None ) -> List[int]: _A : List[str] = [self.sep_token_id] _A : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self , _a , _a = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : Any = os.path.join( _snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
26
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a_ :List[Any] = logging.get_logger(__name__) a_ :List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } a_ :List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase_ (A : Dict ): snake_case__ : Optional[Any] = {} with open(A , 'r' ) as file: for line_number, line in enumerate(A ): snake_case__ : Dict = line.strip() if line: snake_case__ : int = line.split() snake_case__ : List[str] = line_number snake_case__ : Dict = words[0] snake_case__ : Optional[Any] = value return result def lowercase_ (A : int , A : int , A : Optional[int] , A : Optional[Any] , A : Tuple ): for attribute in key.split('.' ): snake_case__ : Optional[int] = getattr(A , A ) snake_case__ : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case__ : Dict = 'param' if weight_type is not None and weight_type != "param": snake_case__ : Union[str, Any] = getattr(A , A ).shape elif weight_type is not None and weight_type == "param": snake_case__ : Optional[int] = hf_pointer for attribute in hf_param_name.split('.' ): snake_case__ : Optional[Any] = getattr(A , A ) snake_case__ : Dict = shape_pointer.shape # let's reduce dimension snake_case__ : List[Any] = value[0] else: snake_case__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : Any = value elif weight_type == "weight_g": snake_case__ : List[Any] = value elif weight_type == "weight_v": snake_case__ : Any = value elif weight_type == "bias": snake_case__ : List[Any] = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): snake_case__ : int = getattr(A , A ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase_ (A : Tuple , A : List[Any] , A : int , A : str , A : Tuple ): snake_case__ : Optional[int] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case__ : str = 'param' if weight_type is not None and weight_type != "param": snake_case__ : int = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case__ : Any = '.'.join([key, hf_param_name] ) else: snake_case__ : Dict = key snake_case__ : List[str] = value if 'lm_head' in full_key else value[0] a_ :List[str] = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase_ (A : str , A : Optional[Any] , A : Optional[Any]=None , A : List[str]=None ): snake_case__ : Optional[int] = False for key, mapped_key in MAPPING.items(): snake_case__ : Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case__ : Optional[int] = True if "*" in mapped_key: snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2] snake_case__ : Union[str, Any] = mapped_key.replace('*' , A ) if "weight_g" in name: snake_case__ : Tuple = 'weight_g' elif "weight_v" in name: snake_case__ : List[str] = 'weight_v' elif "bias" in name: snake_case__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[int] = 'weight' else: snake_case__ : str = None if hf_dict is not None: rename_dict(A , A , A , A , A ) else: set_recursively(A , A , A , A , A ) return is_used return is_used def lowercase_ (A : Optional[Any] , A : Dict , A : Optional[int] ): snake_case__ : Dict = [] snake_case__ : Tuple = fairseq_model.state_dict() snake_case__ : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : str = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) snake_case__ : Any = True else: snake_case__ : Dict = load_wavaveca_layer(A , A , A ) if not is_used: unused_weights.append(A ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase_ (A : Dict , A : Optional[Any] , A : Tuple , A : str , A : List[str] ): snake_case__ : List[Any] = full_name.split('conv_layers.' )[-1] snake_case__ : List[str] = name.split('.' ) snake_case__ : List[Any] = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) @torch.no_grad() def lowercase_ (A : Union[str, Any] , A : str , A : Tuple=None , A : List[str]=None , A : Any=True , A : Optional[int]=False ): if config_path is not None: snake_case__ : List[Any] = WavaVecaConfig.from_pretrained(A ) else: snake_case__ : List[Any] = WavaVecaConfig() if is_seq_class: snake_case__ : Dict = read_txt_into_dict(A ) snake_case__ : Any = idalabel snake_case__ : Union[str, Any] = WavaVecaForSequenceClassification(A ) snake_case__ : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) feature_extractor.save_pretrained(A ) elif is_finetuned: if dict_path: snake_case__ : str = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case__ : List[str] = target_dict.pad_index snake_case__ : Optional[int] = target_dict.bos_index snake_case__ : Optional[int] = target_dict.eos_index snake_case__ : List[Any] = len(target_dict.symbols ) snake_case__ : str = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) snake_case__ : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case__ : Optional[Any] = 0 snake_case__ : Union[str, Any] = 1 with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A , A ) snake_case__ : List[Any] = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) snake_case__ : str = True if config.feat_extract_norm == 'layer' else False snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) snake_case__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) snake_case__ : str = WavaVecaForCTC(A ) else: snake_case__ : int = WavaVecaForPreTraining(A ) if is_finetuned or is_seq_class: snake_case__ , snake_case__ , snake_case__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case__ : Tuple = argparse.Namespace(task='audio_pretraining' ) snake_case__ : str = fairseq.tasks.setup_task(A ) snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) snake_case__ : List[Any] = model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": a_ :List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) a_ :str = parser.parse_args() a_ :Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
277
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase_ ): def __init__( self : str , *A : Optional[Any] , **A : List[str]) -> None: """simple docstring""" warnings.warn( 'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use LayoutLMv2ImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case)
339
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): """simple docstring""" def lowercase_ ( self : str ) ->MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ), } ), ) def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case ) }
277
0
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowercase = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class UpperCamelCase_ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self , **a ) -> Dict: super().__init__(**_snake_case ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self , a , **a ) -> Dict: return super().__call__(_snake_case , **_snake_case ) def _UpperCamelCase ( self , **a ) -> Union[str, Any]: snake_case_ = {} if "candidate_labels" in kwargs: snake_case_ = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case_ = kwargs['hypothesis_template'] return preprocess_params, {}, {} def _UpperCamelCase ( self , a , a=None , a="This is a sound of {}." ) -> int: if isinstance(_snake_case , _snake_case ): if audio.startswith('http://' ) or audio.startswith('https://' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png snake_case_ = requests.get(_snake_case ).content else: with open(_snake_case , 'rb' ) as f: snake_case_ = f.read() if isinstance(_snake_case , _snake_case ): snake_case_ = ffmpeg_read(_snake_case , self.feature_extractor.sampling_rate ) if not isinstance(_snake_case , np.ndarray ): raise ValueError('We expect a numpy ndarray as input' ) if len(audio.shape ) != 1: raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' ) snake_case_ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' ) snake_case_ = candidate_labels snake_case_ = [hypothesis_template.format(_snake_case ) for x in candidate_labels] snake_case_ = self.tokenizer(_snake_case , return_tensors=self.framework , padding=_snake_case ) snake_case_ = [text_inputs] return inputs def _UpperCamelCase ( self , a ) -> int: snake_case_ = model_inputs.pop('candidate_labels' ) snake_case_ = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , _snake_case ): snake_case_ = text_inputs[0] else: # Batching case. snake_case_ = text_inputs[0][0] snake_case_ = self.model(**_snake_case , **_snake_case ) snake_case_ = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_audio, } return model_outputs def _UpperCamelCase ( self , a ) -> List[str]: snake_case_ = model_outputs.pop('candidate_labels' ) snake_case_ = model_outputs['logits'][0] if self.framework == "pt": snake_case_ = logits.softmax(dim=0 ) snake_case_ = probs.tolist() else: raise ValueError('`tf` framework not supported.' ) snake_case_ = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_snake_case , _snake_case ) , key=lambda a : -x[0] ) ] return result
178
from math import factorial def lowercase_ (A : int , A : int , A : float ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(A , A ) or not isinstance(A , A ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) snake_case__ : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! snake_case__ : List[str] = float(factorial(A ) ) coefficient /= factorial(A ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.75))
277
0
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = "https://openaipublic.azureedge.net/jukebox/models/" __UpperCamelCase = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int: if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: SCREAMING_SNAKE_CASE = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: SCREAMING_SNAKE_CASE = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: SCREAMING_SNAKE_CASE = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: SCREAMING_SNAKE_CASE = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: SCREAMING_SNAKE_CASE = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: SCREAMING_SNAKE_CASE = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: SCREAMING_SNAKE_CASE = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: SCREAMING_SNAKE_CASE = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE = {} import re SCREAMING_SNAKE_CASE = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile( R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile( R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile( R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) SCREAMING_SNAKE_CASE = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = {'1': 1, '3': 2}[groups[-2]] SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = {'1': 1, '3': 2}[groups[-2]] SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = {'1': 1, '3': 2}[groups[-2]] SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.' SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}' SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # keep original key else: SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = replace_key(SCREAMING_SNAKE_CASE_ ) if F'{key_prefix}.{key}' not in model_state_dict or key is None: print(F'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape: SCREAMING_SNAKE_CASE = model_state_dict[F'{key_prefix}.{key}'] print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = value return new_dict @torch.no_grad() def lowercase (SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Any=None ) -> Dict: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): SCREAMING_SNAKE_CASE = requests.get(F'{PREFIX}{file}' , allow_redirects=SCREAMING_SNAKE_CASE_ ) os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=SCREAMING_SNAKE_CASE_ ) open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content ) SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split('/' )[-1]] SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = JukeboxModel(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} for i, dict_name in enumerate(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model'] SCREAMING_SNAKE_CASE = {} for k in old_dic.keys(): if k.endswith('.b' ): SCREAMING_SNAKE_CASE = old_dic[k] elif k.endswith('.w' ): SCREAMING_SNAKE_CASE = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: SCREAMING_SNAKE_CASE = old_dic[k] else: SCREAMING_SNAKE_CASE = old_dic[k] SCREAMING_SNAKE_CASE = 'vqvae' if i == 0 else F'priors.{3 - i}' SCREAMING_SNAKE_CASE = fix_jukebox_keys(SCREAMING_SNAKE_CASE_ , model.state_dict() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) weight_dict.append(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = weight_dict.pop(0 ) model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) return weight_dict if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) __UpperCamelCase = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
113
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ :List[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any], **_snake_case : str ) ->Dict: super().__init__(**_snake_case ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self : Union[str, Any], _snake_case : Union[np.ndarray, bytes, str], **_snake_case : Tuple ) ->Dict: return super().__call__(_snake_case, **_snake_case ) def lowercase_ ( self : Tuple, **_snake_case : Any ) ->Union[str, Any]: snake_case__ : str = {} if "candidate_labels" in kwargs: snake_case__ : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case__ : str = kwargs['hypothesis_template'] return preprocess_params, {}, {} def lowercase_ ( self : Dict, _snake_case : str, _snake_case : Optional[int]=None, _snake_case : List[str]="This is a sound of {}." ) ->int: if isinstance(_snake_case, _snake_case ): if audio.startswith('http://' ) or audio.startswith('https://' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png snake_case__ : List[Any] = requests.get(_snake_case ).content else: with open(_snake_case, 'rb' ) as f: snake_case__ : Union[str, Any] = f.read() if isinstance(_snake_case, _snake_case ): snake_case__ : List[Any] = ffmpeg_read(_snake_case, self.feature_extractor.sampling_rate ) if not isinstance(_snake_case, np.ndarray ): raise ValueError('We expect a numpy ndarray as input' ) if len(audio.shape ) != 1: raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' ) snake_case__ : Tuple = self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt' ) snake_case__ : int = candidate_labels snake_case__ : int = [hypothesis_template.format(_snake_case ) for x in candidate_labels] snake_case__ : Optional[int] = self.tokenizer(_snake_case, return_tensors=self.framework, padding=_snake_case ) snake_case__ : List[Any] = [text_inputs] return inputs def lowercase_ ( self : Optional[int], _snake_case : Optional[Any] ) ->int: snake_case__ : Optional[int] = model_inputs.pop('candidate_labels' ) snake_case__ : str = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0], _snake_case ): snake_case__ : Optional[Any] = text_inputs[0] else: # Batching case. snake_case__ : int = text_inputs[0][0] snake_case__ : Any = self.model(**_snake_case, **_snake_case ) snake_case__ : List[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_audio, } return model_outputs def lowercase_ ( self : Union[str, Any], _snake_case : str ) ->List[str]: snake_case__ : int = model_outputs.pop('candidate_labels' ) snake_case__ : List[Any] = model_outputs['logits'][0] if self.framework == "pt": snake_case__ : Tuple = logits.softmax(dim=0 ) snake_case__ : Union[str, Any] = probs.tolist() else: raise ValueError('`tf` framework not supported.' ) snake_case__ : Union[str, Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(_snake_case, _snake_case ), key=lambda _snake_case : -x[0] ) ] return result
277
0
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : Dict ) -> Any: # A mock response for an HTTP head request to emulate server down __lowerCAmelCase = mock.Mock() __lowerCAmelCase = 5_00 __lowerCAmelCase = {} __lowerCAmelCase = HTTPError __lowerCAmelCase = {} # Download this model to make sure it's in the cache. __lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_snake_case ) as mock_head: __lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def a ( self : Dict ) -> Optional[int]: # A mock response for an HTTP head request to emulate server down __lowerCAmelCase = mock.Mock() __lowerCAmelCase = 5_00 __lowerCAmelCase = {} __lowerCAmelCase = HTTPError __lowerCAmelCase = {} # Download this model to make sure it's in the cache. __lowerCAmelCase = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_snake_case ) as mock_head: __lowerCAmelCase = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def a ( self : List[Any] ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: __lowerCAmelCase = tempfile.mktemp() with open(_snake_case , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _snake_case ) __lowerCAmelCase = AlbertTokenizer.from_pretrained(_snake_case ) finally: os.remove(_snake_case ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _snake_case ) __lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 10_00 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def a ( self : Union[str, Any] ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 __lowerCAmelCase = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def a ( cls : Any ) -> List[Any]: __lowerCAmelCase = TOKEN HfFolder.save_token(_snake_case ) @classmethod def a ( cls : Any ) -> int: try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def a ( self : Optional[int] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = os.path.join(_snake_case , """vocab.txt""" ) with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __lowerCAmelCase = BertTokenizer(_snake_case ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) __lowerCAmelCase = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case , repo_id="""test-tokenizer""" , push_to_hub=_snake_case , use_auth_token=self._token ) __lowerCAmelCase = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def a ( self : Tuple ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = os.path.join(_snake_case , """vocab.txt""" ) with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __lowerCAmelCase = BertTokenizer(_snake_case ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) __lowerCAmelCase = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_snake_case , use_auth_token=self._token ) __lowerCAmelCase = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def a ( self : Optional[int] ) -> List[str]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = os.path.join(_snake_case , """vocab.txt""" ) with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __lowerCAmelCase = CustomTokenizer(_snake_case ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) __lowerCAmelCase = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=_snake_case ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = os.path.join(_snake_case , """vocab.txt""" ) with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __lowerCAmelCase = BertTokenizerFast.from_pretrained(_snake_case ) bert_tokenizer.save_pretrained(_snake_case ) __lowerCAmelCase = CustomTokenizerFast.from_pretrained(_snake_case ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) __lowerCAmelCase = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=_snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) __lowerCAmelCase = AutoTokenizer.from_pretrained( f"""{USER}/test-dynamic-tokenizer""" , use_fast=_snake_case , trust_remote_code=_snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : Optional[Any] ) -> Any: __lowerCAmelCase = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def a ( self : Tuple ) -> Any: __lowerCAmelCase = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def a ( self : Optional[Any] ) -> Union[str, Any]: __lowerCAmelCase = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def a ( self : Union[str, Any] ) -> Dict: __lowerCAmelCase = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a ( self : Optional[int] ) -> List[Any]: __lowerCAmelCase = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a ( self : List[str] ) -> List[Any]: __lowerCAmelCase = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def a ( self : int ) -> Optional[Any]: __lowerCAmelCase = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def a ( self : str ) -> Dict: # Even if the offsets are wrong, we necessarily output correct string # parts. __lowerCAmelCase = Trie() __lowerCAmelCase = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_snake_case , ["""AB""", """C"""] )
229
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ : """simple docstring""" def __init__( self : Tuple, _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=3_2, _snake_case : Tuple=2, _snake_case : Any=3, _snake_case : Tuple=1_6, _snake_case : Tuple=[1, 2, 1], _snake_case : Dict=[2, 2, 4], _snake_case : str=2, _snake_case : Union[str, Any]=2.0, _snake_case : Dict=True, _snake_case : Dict=0.0, _snake_case : str=0.0, _snake_case : str=0.1, _snake_case : List[str]="gelu", _snake_case : int=False, _snake_case : Optional[Any]=True, _snake_case : List[Any]=0.0_2, _snake_case : Union[str, Any]=1e-5, _snake_case : Union[str, Any]=True, _snake_case : List[Any]=None, _snake_case : Any=True, _snake_case : List[Any]=1_0, _snake_case : str=8, ) ->Union[str, Any]: snake_case__ : Any = parent snake_case__ : Tuple = batch_size snake_case__ : Tuple = image_size snake_case__ : Any = patch_size snake_case__ : Optional[int] = num_channels snake_case__ : Tuple = embed_dim snake_case__ : Any = depths snake_case__ : Any = num_heads snake_case__ : List[str] = window_size snake_case__ : Dict = mlp_ratio snake_case__ : Optional[int] = qkv_bias snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : str = hidden_act snake_case__ : Union[str, Any] = use_absolute_embeddings snake_case__ : Union[str, Any] = patch_norm snake_case__ : Any = layer_norm_eps snake_case__ : Tuple = initializer_range snake_case__ : Dict = is_training snake_case__ : Any = scope snake_case__ : Optional[Any] = use_labels snake_case__ : str = type_sequence_label_size snake_case__ : List[Any] = encoder_stride def lowercase_ ( self : Tuple ) ->str: snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : List[Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case__ : Any = self.get_config() return config, pixel_values, labels def lowercase_ ( self : Optional[int] ) ->Optional[int]: return SwinvaConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : int ) ->Dict: snake_case__ : List[Any] = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Optional[int] = model(_snake_case ) snake_case__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case__ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self : Optional[Any], _snake_case : Any, _snake_case : List[str], _snake_case : Dict ) ->List[Any]: snake_case__ : List[str] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Union[str, Any] = model(_snake_case ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case__ : Optional[Any] = 1 snake_case__ : Optional[int] = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : Any = model(_snake_case ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase_ ( self : List[str], _snake_case : int, _snake_case : List[Any], _snake_case : Optional[int] ) ->Any: snake_case__ : Tuple = self.type_sequence_label_size snake_case__ : int = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Tuple = model(_snake_case, labels=_snake_case ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self : Any ) ->Dict: snake_case__ : str = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs snake_case__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def lowercase_ ( self : Union[str, Any] ) ->Dict: snake_case__ : Optional[int] = SwinvaModelTester(self ) snake_case__ : int = ConfigTester(self, config_class=_snake_case, embed_dim=3_7 ) def lowercase_ ( self : Tuple ) ->int: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self : Any ) ->str: snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def lowercase_ ( self : Any ) ->Union[str, Any]: pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def lowercase_ ( self : str ) ->Union[str, Any]: pass def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]: snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Union[str, Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) snake_case__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case, nn.Linear ) ) def lowercase_ ( self : List[str] ) ->Optional[int]: snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(_snake_case ) snake_case__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Optional[Any] = [*signature.parameters.keys()] snake_case__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1], _snake_case ) def lowercase_ ( self : str ) ->Union[str, Any]: snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : int = True for model_class in self.all_model_classes: snake_case__ : str = True snake_case__ : Union[str, Any] = False snake_case__ : Tuple = True snake_case__ : int = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) ) snake_case__ : List[str] = outputs.attentions snake_case__ : List[Any] = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ), _snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case__ : str = True snake_case__ : Tuple = config.window_size**2 snake_case__ : Optional[int] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): snake_case__ : str = model(**self._prepare_for_class(_snake_case, _snake_case ) ) snake_case__ : Tuple = outputs.attentions self.assertEqual(len(_snake_case ), _snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) snake_case__ : Optional[Any] = len(_snake_case ) # Check attention is always last and order is fine snake_case__ : Optional[int] = True snake_case__ : Dict = True snake_case__ : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) ) if hasattr(self.model_tester, 'num_hidden_states_types' ): snake_case__ : str = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case__ : Dict = 2 self.assertEqual(out_len + added_hidden_states, len(_snake_case ) ) snake_case__ : Any = outputs.attentions self.assertEqual(len(_snake_case ), _snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def lowercase_ ( self : Dict, _snake_case : Tuple, _snake_case : Any, _snake_case : int, _snake_case : Optional[int] ) ->str: snake_case__ : Dict = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): snake_case__ : List[Any] = model(**self._prepare_for_class(_snake_case, _snake_case ) ) snake_case__ : Dict = outputs.hidden_states snake_case__ : int = getattr( self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ), _snake_case ) # Swinv2 has a different seq_length snake_case__ : int = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], ) snake_case__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ), _snake_case ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = reshaped_hidden_states[0].shape snake_case__ : Any = ( reshaped_hidden_states[0].view(_snake_case, _snake_case, height * width ).permute(0, 2, 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], ) def lowercase_ ( self : str ) ->List[Any]: snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case__ : Optional[int] = True self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : Dict = True self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case ) def lowercase_ ( self : List[str] ) ->str: snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[str] = 3 snake_case__ : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case__ : str = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case__ : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case__ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case__ : int = True self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[str] = True self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) ) def lowercase_ ( self : List[str] ) ->Optional[int]: snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def lowercase_ ( self : List[Any] ) ->str: snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def lowercase_ ( self : str ) ->Union[str, Any]: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Dict = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def lowercase_ ( self : Optional[int] ) ->List[str]: snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: snake_case__ : List[str] = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @require_vision @require_torch class snake_case__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self : Union[str, Any] ) ->List[str]: return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def lowercase_ ( self : int ) ->List[Any]: snake_case__ : Any = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( _snake_case ) snake_case__ : int = self.default_image_processor snake_case__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) snake_case__ : Optional[Any] = image_processor(images=_snake_case, return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): snake_case__ : List[str] = model(**_snake_case ) # verify the logits snake_case__ : int = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, _snake_case ) snake_case__ : Optional[int] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
277
0
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase = 16 UpperCAmelCase = 32 def lowercase ( a__ : Accelerator , a__ : DatasetDict , a__ : List[int] , a__ : List[int] , a__ : int = 16 ) -> Dict: _UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCamelCase = DatasetDict( { '''train''': dataset['''train'''].select(a__ ), '''validation''': dataset['''train'''].select(a__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(a__ : int ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a__ , max_length=a__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCamelCase = datasets.map( a__ , batched=a__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(a__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCamelCase = 16 elif accelerator.mixed_precision != "no": _UpperCamelCase = 8 else: _UpperCamelCase = None return tokenizer.pad( a__ , padding='''longest''' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) _UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) _UpperCamelCase = DataLoader( tokenized_datasets['''test'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) return train_dataloader, eval_dataloader, test_dataloader def lowercase ( a__ : int , a__ : str ) -> Any: # New Code # _UpperCamelCase = [] # Download the dataset _UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) # Create our splits _UpperCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase = config['lr'] _UpperCamelCase = int(config['''num_epochs'''] ) _UpperCamelCase = int(config['''seed'''] ) _UpperCamelCase = int(config['''batch_size'''] ) _UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCamelCase = MAX_GPU_BATCH_SIZE set_seed(a__ ) # New Code # # Create our folds: _UpperCamelCase = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) _UpperCamelCase = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(a__ ): _UpperCamelCase = get_fold_dataloaders( a__ , a__ , a__ , a__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCamelCase = AdamW(params=model.parameters() , lr=a__ ) # Instantiate scheduler _UpperCamelCase = get_linear_schedule_with_warmup( optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # Now we train the model for epoch in range(a__ ): model.train() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCamelCase = model(**a__ ) _UpperCamelCase = outputs.loss _UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(a__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase = model(**a__ ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=a__ , references=a__ , ) _UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , a__ ) # New Code # # We also run predictions on the test set at the very end _UpperCamelCase = [] for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase = model(**a__ ) _UpperCamelCase = outputs.logits _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(a__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _UpperCamelCase = torch.cat(a__ , dim=0 ) _UpperCamelCase = torch.stack(a__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _UpperCamelCase = metric.compute(predictions=a__ , references=a__ ) accelerator.print('''Average test metrics from all folds:''' , a__ ) def lowercase ( ) -> str: _UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=a__ , default=a__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=a__ , default=3 , help='''The number of splits to perform across the dataset''' ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(a__ , a__ ) if __name__ == "__main__": main()
256
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class snake_case__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int], _snake_case : List[Any], _snake_case : str=7, _snake_case : Tuple=3, _snake_case : List[str]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Any=True, _snake_case : List[Any]=None, _snake_case : int=0.9, _snake_case : Optional[Any]=None, _snake_case : str=True, _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], ) ->List[Any]: snake_case__ : int = size if size is not None else {'shortest_edge': 3_0} snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0} snake_case__ : Union[str, Any] = parent snake_case__ : Dict = batch_size snake_case__ : int = num_channels snake_case__ : Tuple = min_resolution snake_case__ : Any = max_resolution snake_case__ : List[Any] = do_resize_and_center_crop snake_case__ : str = size snake_case__ : str = crop_pct snake_case__ : List[str] = crop_size snake_case__ : Optional[int] = do_normalize snake_case__ : Tuple = image_mean snake_case__ : Tuple = image_std def lowercase_ ( self : Optional[int] ) ->int: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None def lowercase_ ( self : Union[str, Any] ) ->Dict: snake_case__ : Union[str, Any] = PoolFormerImageProcessingTester(self ) @property def lowercase_ ( self : int ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : Union[str, Any] ) ->Optional[int]: snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case, 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_snake_case, 'size' ) ) self.assertTrue(hasattr(_snake_case, 'crop_pct' ) ) self.assertTrue(hasattr(_snake_case, 'do_normalize' ) ) self.assertTrue(hasattr(_snake_case, 'image_mean' ) ) self.assertTrue(hasattr(_snake_case, 'image_std' ) ) def lowercase_ ( self : List[str] ) ->List[str]: snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 3_0} ) self.assertEqual(image_processor.crop_size, {'height': 3_0, 'width': 3_0} ) snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 ) self.assertEqual(image_processor.size, {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} ) def lowercase_ ( self : List[Any] ) ->List[Any]: pass def lowercase_ ( self : List[str] ) ->str: # Initialize image_processing snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, Image.Image ) # Test not batched input snake_case__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def lowercase_ ( self : int ) ->List[Any]: # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, np.ndarray ) # Test not batched input snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : List[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def lowercase_ ( self : List[str] ) ->List[str]: # Initialize image_processing snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : Optional[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
277
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A_ ( lowerCAmelCase_ ): _lowercase : int = ['vqvae'] def __init__( self : Any , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Mel , UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> Dict: super().__init__() self.register_modules(unet=_snake_case , scheduler=_snake_case , mel=_snake_case , vqvae=_snake_case ) def UpperCAmelCase ( self : List[str] ) -> int: return 5_0 if isinstance(self.scheduler , _snake_case ) else 1_0_0_0 @torch.no_grad() def __call__( self : Dict , UpperCAmelCase : int = 1 , UpperCAmelCase : str = None , UpperCAmelCase : np.ndarray = None , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = None , UpperCAmelCase : torch.Generator = None , UpperCAmelCase : float = 0 , UpperCAmelCase : float = 0 , UpperCAmelCase : torch.Generator = None , UpperCAmelCase : float = 0 , UpperCAmelCase : torch.Tensor = None , UpperCAmelCase : torch.Tensor = None , UpperCAmelCase : Optional[Any]=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: __lowerCAmelCase: Any = steps or self.get_default_steps() self.scheduler.set_timesteps(_snake_case ) __lowerCAmelCase: Any = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowerCAmelCase: Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowerCAmelCase: str = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_snake_case , device=self.device , ) __lowerCAmelCase: Optional[int] = noise __lowerCAmelCase: str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_snake_case , _snake_case ) __lowerCAmelCase: Tuple = self.mel.audio_slice_to_image(_snake_case ) __lowerCAmelCase: str = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowerCAmelCase: Union[str, Any] = (input_image / 2_5_5) * 2 - 1 __lowerCAmelCase: Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowerCAmelCase: Any = self.vqvae.encode(torch.unsqueeze(_snake_case , 0 ) ).latent_dist.sample( generator=_snake_case )[0] __lowerCAmelCase: int = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowerCAmelCase: int = self.scheduler.add_noise(_snake_case , _snake_case , self.scheduler.timesteps[start_step - 1] ) __lowerCAmelCase: Dict = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowerCAmelCase: List[Any] = int(mask_start_secs * pixels_per_second ) __lowerCAmelCase: int = int(mask_end_secs * pixels_per_second ) __lowerCAmelCase: str = self.scheduler.add_noise(_snake_case , _snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _snake_case ): __lowerCAmelCase: List[str] = self.unet(_snake_case , _snake_case , _snake_case )['sample'] else: __lowerCAmelCase: List[Any] = self.unet(_snake_case , _snake_case )['sample'] if isinstance(self.scheduler , _snake_case ): __lowerCAmelCase: Any = self.scheduler.step( model_output=_snake_case , timestep=_snake_case , sample=_snake_case , eta=_snake_case , generator=_snake_case , )['prev_sample'] else: __lowerCAmelCase: List[str] = self.scheduler.step( model_output=_snake_case , timestep=_snake_case , sample=_snake_case , generator=_snake_case , )['prev_sample'] if mask is not None: if mask_start > 0: __lowerCAmelCase: Optional[int] = mask[:, step, :, :mask_start] if mask_end > 0: __lowerCAmelCase: Optional[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowerCAmelCase: List[Any] = 1 / self.vqvae.config.scaling_factor * images __lowerCAmelCase: int = self.vqvae.decode(_snake_case )['sample'] __lowerCAmelCase: Dict = (images / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase: List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowerCAmelCase: Any = (images * 2_5_5).round().astype('uint8' ) __lowerCAmelCase: int = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_snake_case , mode='RGB' ).convert('L' ) for _ in images) ) __lowerCAmelCase: Any = [self.mel.image_to_audio(_snake_case ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(_snake_case ) ) @torch.no_grad() def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[Image.Image] , UpperCAmelCase : int = 5_0 ) -> np.ndarray: assert isinstance(self.scheduler , _snake_case ) self.scheduler.set_timesteps(_snake_case ) __lowerCAmelCase: Optional[Any] = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowerCAmelCase: Tuple = (sample / 2_5_5) * 2 - 1 __lowerCAmelCase: Tuple = torch.Tensor(_snake_case ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowerCAmelCase: Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowerCAmelCase: Optional[Any] = self.scheduler.alphas_cumprod[t] __lowerCAmelCase: Dict = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowerCAmelCase: int = 1 - alpha_prod_t __lowerCAmelCase: Any = self.unet(_snake_case , _snake_case )['sample'] __lowerCAmelCase: List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowerCAmelCase: Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowerCAmelCase: List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCAmelCase ( UpperCAmelCase : torch.Tensor , UpperCAmelCase : torch.Tensor , UpperCAmelCase : float ) -> torch.Tensor: __lowerCAmelCase: str = acos(torch.dot(torch.flatten(_snake_case ) , torch.flatten(_snake_case ) ) / torch.norm(_snake_case ) / torch.norm(_snake_case ) ) return sin((1 - alpha) * theta ) * xa / sin(_snake_case ) + sin(alpha * theta ) * xa / sin(_snake_case )
322
from collections import deque from .hash_table import HashTable class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]: super().__init__(*_snake_case, **_snake_case ) def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict: snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_snake_case ) snake_case__ : Dict = self.values[key] def lowercase_ ( self : Any ) ->Optional[Any]: return ( sum(self.charge_factor - len(_snake_case ) for slot in self.values ) / self.size_table * self.charge_factor ) def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0 ): return key return super()._collision_resolution(_snake_case, _snake_case )
277
0
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ): """simple docstring""" a__ : Dict =[redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: a__ : Any =1 - (matter_density + radiation_density + dark_energy) a__ : List[str] =( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a__ : Dict =hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCAmelCase : Union[str, Any] = 0.3 print( hubble_parameter( hubble_constant=6_8.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
95
def lowercase_ (A : Union[str, Any] , A : List[str] , A : int , A : Optional[int] ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case__ : Union[str, Any] = mf_knapsack(i - 1 , A , A , A ) else: snake_case__ : Any = max( mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , ) snake_case__ : Optional[int] = val return f[i][j] def lowercase_ (A : Optional[int] , A : Union[str, Any] , A : str , A : Dict ): snake_case__ : int = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case__ : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case__ : str = dp[i - 1][w_] return dp[n][w_], dp def lowercase_ (A : int , A : list , A : list ): if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case__ : Dict = len(A ) if num_items != len(A ): snake_case__ : str = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(A )} values''' ) raise ValueError(A ) for i in range(A ): if not isinstance(wt[i] , A ): snake_case__ : Optional[int] = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(A ) snake_case__ , snake_case__ : Optional[int] = knapsack(A , A , A , A ) snake_case__ : set = set() _construct_solution(A , A , A , A , A ) return optimal_val, example_optional_set def lowercase_ (A : list , A : list , A : int , A : int , A : set ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(A , A , i - 1 , A , A ) else: optimal_set.add(A ) _construct_solution(A , A , i - 1 , j - wt[i - 1] , A ) if __name__ == "__main__": a_ :Any = [3, 2, 4, 4] a_ :List[Any] = [4, 3, 2, 3] a_ :Union[str, Any] = 4 a_ :List[str] = 6 a_ :Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] a_ , a_ :List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 a_ , a_ :Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
277
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __a = { "sample_size": 3_2, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_0_0_0, "block_out_channels": [3_2, 6_4], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } __a = { "sample_size": 6_4, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_0_0_0, "block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } __a = { "sample_size": 2_5_6, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } __a = { "num_train_timesteps": 4_0, "sigma_min": 0.002, "sigma_max": 80.0, } __a = { "num_train_timesteps": 2_0_1, "sigma_min": 0.002, "sigma_max": 80.0, } __a = { "num_train_timesteps": 1_5_1, "sigma_min": 0.002, "sigma_max": 80.0, } def a ( snake_case__: Optional[int] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def a ( snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: Any=False ): '''simple docstring''' lowercase_ = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] lowercase_ = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] lowercase_ = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] lowercase_ = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] lowercase_ = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: lowercase_ = checkpoint[F'''{old_prefix}.skip_connection.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def a ( snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: Optional[Any] , snake_case__: Optional[Any] , snake_case__: List[Any]=None ): '''simple docstring''' lowercase_ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) lowercase_ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) lowercase_ = checkpoint[F'''{old_prefix}.norm.weight'''] lowercase_ = checkpoint[F'''{old_prefix}.norm.bias'''] lowercase_ = weight_q.squeeze(-1 ).squeeze(-1 ) lowercase_ = bias_q.squeeze(-1 ).squeeze(-1 ) lowercase_ = weight_k.squeeze(-1 ).squeeze(-1 ) lowercase_ = bias_k.squeeze(-1 ).squeeze(-1 ) lowercase_ = weight_v.squeeze(-1 ).squeeze(-1 ) lowercase_ = bias_v.squeeze(-1 ).squeeze(-1 ) lowercase_ = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) lowercase_ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def a ( snake_case__: str , snake_case__: str ): '''simple docstring''' lowercase_ = torch.load(snake_case__ , map_location='''cpu''' ) lowercase_ = {} lowercase_ = checkpoint['time_embed.0.weight'] lowercase_ = checkpoint['time_embed.0.bias'] lowercase_ = checkpoint['time_embed.2.weight'] lowercase_ = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: lowercase_ = checkpoint['label_emb.weight'] lowercase_ = checkpoint['input_blocks.0.0.weight'] lowercase_ = checkpoint['input_blocks.0.0.bias'] lowercase_ = unet_config['down_block_types'] lowercase_ = unet_config['layers_per_block'] lowercase_ = unet_config['attention_head_dim'] lowercase_ = unet_config['block_out_channels'] lowercase_ = 1 lowercase_ = channels_list[0] for i, layer_type in enumerate(snake_case__ ): lowercase_ = channels_list[i] lowercase_ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(snake_case__ ): lowercase_ = F'''down_blocks.{i}.resnets.{j}''' lowercase_ = F'''input_blocks.{current_layer}.0''' lowercase_ = True if j == 0 and downsample_block_has_skip else False lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(snake_case__ ): lowercase_ = F'''down_blocks.{i}.resnets.{j}''' lowercase_ = F'''input_blocks.{current_layer}.0''' lowercase_ = True if j == 0 and downsample_block_has_skip else False lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ ) lowercase_ = F'''down_blocks.{i}.attentions.{j}''' lowercase_ = F'''input_blocks.{current_layer}.1''' lowercase_ = convert_attention( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: lowercase_ = F'''down_blocks.{i}.downsamplers.0''' lowercase_ = F'''input_blocks.{current_layer}.0''' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) current_layer += 1 lowercase_ = current_channels # hardcoded the mid-block for now lowercase_ = 'mid_block.resnets.0' lowercase_ = 'middle_block.0' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase_ = 'mid_block.attentions.0' lowercase_ = 'middle_block.1' lowercase_ = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase_ = 'mid_block.resnets.1' lowercase_ = 'middle_block.2' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase_ = 0 lowercase_ = unet_config['up_block_types'] for i, layer_type in enumerate(snake_case__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowercase_ = F'''up_blocks.{i}.resnets.{j}''' lowercase_ = F'''output_blocks.{current_layer}.0''' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: lowercase_ = F'''up_blocks.{i}.upsamplers.0''' lowercase_ = F'''output_blocks.{current_layer-1}.1''' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowercase_ = F'''up_blocks.{i}.resnets.{j}''' lowercase_ = F'''output_blocks.{current_layer}.0''' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ ) lowercase_ = F'''up_blocks.{i}.attentions.{j}''' lowercase_ = F'''output_blocks.{current_layer}.1''' lowercase_ = convert_attention( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: lowercase_ = F'''up_blocks.{i}.upsamplers.0''' lowercase_ = F'''output_blocks.{current_layer-1}.2''' lowercase_ = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase_ = checkpoint['out.0.weight'] lowercase_ = checkpoint['out.0.bias'] lowercase_ = checkpoint['out.2.weight'] lowercase_ = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') __a = parser.parse_args() __a = strabool(args.class_cond) __a = os.path.basename(args.unet_path) print(f"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: __a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __a = TEST_UNET_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: __a = None __a = con_pt_to_diffuser(args.unet_path, unet_config) __a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.") __a = CMStochasticIterativeScheduler(**scheduler_config) __a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
30
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ :int = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[str] = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :int = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
277
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]: # Load configuration defined in the metadata file with open(lowerCAmelCase__ ) as metadata_file: __a = json.load(lowerCAmelCase__ ) __a = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path __a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['module'] # Load the entity vocab file __a = load_original_entity_vocab(lowerCAmelCase__ ) # add an entry for [MASK2] __a = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __a = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks __a = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) __a = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f: __a = json.load(lowerCAmelCase__ ) __a = 'MLukeTokenizer' with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) __a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ ) # Initialize the embeddings of the special tokens __a = tokenizer.convert_tokens_to_ids(['''@'''] )[0] __a = tokenizer.convert_tokens_to_ids(['''#'''] )[0] __a = state_dict['embeddings.word_embeddings.weight'] __a = word_emb[ent_init_index].unsqueeze(0 ) __a = word_emb[enta_init_index].unsqueeze(0 ) __a = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __a = state_dict[bias_name] __a = decoder_bias[ent_init_index].unsqueeze(0 ) __a = decoder_bias[enta_init_index].unsqueeze(0 ) __a = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __a = f'''encoder.layer.{layer_index}.attention.self.''' __a = state_dict[prefix + matrix_name] __a = state_dict[prefix + matrix_name] __a = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __a = state_dict['entity_embeddings.entity_embeddings.weight'] __a = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 ) __a = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __a = state_dict['entity_predictions.bias'] __a = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 ) __a = torch.cat([entity_prediction_bias, entity_mask_bias] ) __a = LukeForMaskedLM(config=lowerCAmelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) __a = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): __a = state_dict[key] else: __a = state_dict[key] __a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(lowerCAmelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' ) __a = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).' __a = (0, 9) __a = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' ) __a = model(**lowerCAmelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __a = torch.Size((1, 33, 768) ) __a = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __a = torch.Size((1, 1, 768) ) __a = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction __a = MLukeTokenizer.from_pretrained(lowerCAmelCase__ ) __a = 'Tokyo is the capital of <mask>.' __a = (24, 30) __a = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' ) __a = model(**lowerCAmelCase__ ) __a = encoding['input_ids'][0].tolist() __a = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) __a = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowerCAmelCase__ ) __a = outputs.entity_logits[0][0].argmax().item() __a = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) ) model.save_pretrained(lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple: __a = ['[MASK]', '[PAD]', '[UNK]'] __a = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )] __a = {} for entry in data: __a = entry['id'] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __a = entity_id break __a = f'''{language}:{entity_name}''' __a = entity_id return new_mapping if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowercase_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
45
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowercase_ (A : List[str] ): snake_case__ : Tuple = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(A , A ) def lowercase_ (A : str ): snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape snake_case__ : str = nn.Linear(A , A , bias=A ) snake_case__ : str = emb.weight.data return lin_layer def lowercase_ (A : Optional[int] , A : Union[str, Any]=None ): snake_case__ : Any = {} for old_key in state_dict.keys(): snake_case__ : Tuple = old_key if "moe_layer.experts." in key: if expert_idx is not None: snake_case__ : int = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' ) else: snake_case__ : Any = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: snake_case__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: snake_case__ : str = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: snake_case__ : str = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: snake_case__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: snake_case__ : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: snake_case__ : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' ) snake_case__ : Dict = state_dict[old_key] return new_dict def lowercase_ (A : List[Any] , A : Tuple , A : List[Any] , A : List[str] , A : str = WEIGHTS_NAME ): snake_case__ : Dict = [] snake_case__ : str = 0 os.makedirs(A , exist_ok=A ) for expert in range(A ): snake_case__ : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(A ): snake_case__ : Optional[Any] = torch.load(A )['model'] remove_ignore_keys_(A ) snake_case__ : Optional[Any] = rename_fairseq_keys(A , A ) snake_case__ : Dict = os.path.join( A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) ) torch.save(A , A ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A )[0]].dtype ) # Add the last block snake_case__ : Tuple = os.path.join(A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) ) snake_case__ : Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(A ) snake_case__ : str = rename_fairseq_keys(A , A ) snake_case__ : Any = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A ) == 1: snake_case__ : Any = os.path.join(A , A ) torch.save(A , A ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A , A ) # Otherwise, let's build the index snake_case__ : Tuple = {} for idx, shard in enumerate(A ): snake_case__ : Optional[int] = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A ):05d}.bin''' ) snake_case__ : List[Any] = os.path.join(A , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(A , os.path.join(A , A ) ) for key in shard: snake_case__ : Any = shard_file # Add the metadata snake_case__ : int = {'total_size': total_size} snake_case__ : Dict = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f: snake_case__ : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n' f.write(A ) return metadata, index if __name__ == "__main__": a_ :int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) a_ :Optional[Any] = parser.parse_args() a_ , a_ :Optional[Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) a_ :List[str] = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
277
0
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCAmelCase = logging.getLogger(__name__) def __lowerCAmelCase ( snake_case__ , snake_case__ ): # save results if os.path.exists(snake_case__ ): if os.path.exists(os.path.join(snake_case__ , "config.json" ) ) and os.path.isfile( os.path.join(snake_case__ , "config.json" ) ): os.remove(os.path.join(snake_case__ , "config.json" ) ) if os.path.exists(os.path.join(snake_case__ , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(snake_case__ , "pytorch_model.bin" ) ): os.remove(os.path.join(snake_case__ , "pytorch_model.bin" ) ) else: os.makedirs(snake_case__ ) model.save_pretrained(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase : str = 2 if unlogit: __UpperCamelCase : Dict = torch.pow(snake_case__ , snake_case__ ) __UpperCamelCase : Any = p * torch.log(snake_case__ ) __UpperCamelCase : Tuple = 0 return -plogp.sum(dim=-1 ) def __lowerCAmelCase ( snake_case__ ): logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(snake_case__ ) ) ) ) for row in range(len(snake_case__ ) ): if tensor.dtype != torch.long: logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__=False ): __UpperCamelCase : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads __UpperCamelCase : int = torch.zeros(snake_case__ , snake_case__ ).to(args.device ) __UpperCamelCase : Any = torch.zeros(snake_case__ , snake_case__ ).to(args.device ) if head_mask is None: __UpperCamelCase : Dict = torch.ones(snake_case__ , snake_case__ ).to(args.device ) head_mask.requires_grad_(requires_grad=snake_case__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __UpperCamelCase : Optional[int] = None __UpperCamelCase : List[Any] = 0.0 __UpperCamelCase : str = 0.0 for step, inputs in enumerate(tqdm(snake_case__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): __UpperCamelCase : Union[str, Any] = tuple(t.to(args.device ) for t in inputs ) (__UpperCamelCase ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __UpperCamelCase : Union[str, Any] = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __UpperCamelCase : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(snake_case__ ): __UpperCamelCase : Optional[Any] = entropy(attn.detach() , snake_case__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __UpperCamelCase : Union[str, Any] = 2 __UpperCamelCase : List[Any] = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: __UpperCamelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(snake_case__ ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(snake_case__ ) logger.info("Head ranked by importance scores" ) __UpperCamelCase : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __UpperCamelCase : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) __UpperCamelCase : str = head_ranks.view_as(snake_case__ ) print_ad_tensor(snake_case__ ) return attn_entropy, head_importance, total_loss def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Any = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ ) __UpperCamelCase : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , snake_case__ , original_score * args.masking_threshold ) __UpperCamelCase : Optional[Any] = torch.ones_like(snake_case__ ) __UpperCamelCase : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __UpperCamelCase : Dict = original_score while current_score >= original_score * args.masking_threshold: __UpperCamelCase : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __UpperCamelCase : List[Any] = float("Inf" ) __UpperCamelCase : Union[str, Any] = head_importance.view(-1 ).sort()[1] if len(snake_case__ ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads __UpperCamelCase : int = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) __UpperCamelCase : int = new_head_mask.view(-1 ) __UpperCamelCase : int = 0.0 __UpperCamelCase : Union[str, Any] = new_head_mask.view_as(snake_case__ ) __UpperCamelCase : List[str] = new_head_mask.clone().detach() print_ad_tensor(snake_case__ ) # Compute metric and head importance again __UpperCamelCase : Any = compute_heads_importance( snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ ) __UpperCamelCase : Dict = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(snake_case__ ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Any = datetime.now() __UpperCamelCase : str = compute_heads_importance( snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ ) __UpperCamelCase : Tuple = 1 / loss __UpperCamelCase : Dict = datetime.now() - before_time __UpperCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() ) __UpperCamelCase : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) ) } for k, v in heads_to_prune.items(): if isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Any = [ v, ] assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(snake_case__ ) __UpperCamelCase : Dict = sum(p.numel() for p in model.parameters() ) __UpperCamelCase : Tuple = datetime.now() __UpperCamelCase : Dict = compute_heads_importance( snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , ) __UpperCamelCase : Any = 1 / loss __UpperCamelCase : int = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case__ , snake_case__ ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(snake_case__ , args.output_dir ) def __lowerCAmelCase ( ): __UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=snake_case__ , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=snake_case__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=snake_case__ , type=snake_case__ , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=snake_case__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=snake_case__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=snake_case__ , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=snake_case__ , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=snake_case__ , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=snake_case__ , help="Batch size." ) parser.add_argument("--seed" , type=snake_case__ , default=42 ) parser.add_argument("--local_rank" , type=snake_case__ , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=snake_case__ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=snake_case__ , default="" , help="Can be used for distant debugging." ) __UpperCamelCase : Optional[int] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __UpperCamelCase : List[Any] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) __UpperCamelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __UpperCamelCase : int = torch.device("cuda" , args.local_rank ) __UpperCamelCase : List[str] = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __UpperCamelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __UpperCamelCase : List[str] = nn.parallel.DistributedDataParallel( snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ ) elif args.n_gpu > 1: __UpperCamelCase : Optional[int] = nn.DataParallel(snake_case__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=snake_case__ ) torch.save(snake_case__ , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , snake_case__ ) # Prepare dataset __UpperCamelCase : Optional[Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __UpperCamelCase : List[str] = (torch.from_numpy(snake_case__ ),) __UpperCamelCase : int = TensorDataset(*snake_case__ ) __UpperCamelCase : Union[str, Any] = RandomSampler(snake_case__ ) __UpperCamelCase : Any = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(snake_case__ , snake_case__ , snake_case__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __UpperCamelCase : Dict = mask_heads(snake_case__ , snake_case__ , snake_case__ ) prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": main()
298
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :str = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :int = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[str] = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
277
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py _snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" _snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" _snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def a__ ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def a__ ( self , _a , _a , _a=4 , _a=False ) -> Optional[int]: _A : Any = compute_bleu( reference_corpus=_snake_case , translation_corpus=_snake_case , max_order=_snake_case , smooth=_snake_case ) (_A) : Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
26
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch a_ :Any = random.Random() def lowercase_ (A : int , A : Union[str, Any]=1.0 , A : List[str]=None , A : Any=None ): if rng is None: snake_case__ : List[str] = global_rng snake_case__ : int = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class snake_case__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any], _snake_case : List[str], _snake_case : Tuple=7, _snake_case : Union[str, Any]=4_0_0, _snake_case : Any=2_0_0_0, _snake_case : Dict=1, _snake_case : Optional[Any]=0.0, _snake_case : List[Any]=1_6_0_0_0, _snake_case : List[Any]=True, _snake_case : List[Any]=8_0, _snake_case : Dict=1_6, _snake_case : str=6_4, _snake_case : Tuple="hann_window", _snake_case : Union[str, Any]=8_0, _snake_case : Optional[Any]=7_6_0_0, _snake_case : str=1e-10, _snake_case : Any=True, ) ->Union[str, Any]: snake_case__ : Optional[int] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : List[Any] = min_seq_length snake_case__ : List[Any] = max_seq_length snake_case__ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case__ : Tuple = feature_size snake_case__ : List[Any] = padding_value snake_case__ : Any = sampling_rate snake_case__ : Dict = do_normalize snake_case__ : Union[str, Any] = num_mel_bins snake_case__ : Any = hop_length snake_case__ : Any = win_length snake_case__ : Any = win_function snake_case__ : Optional[int] = fmin snake_case__ : int = fmax snake_case__ : Union[str, Any] = mel_floor snake_case__ : Union[str, Any] = return_attention_mask def lowercase_ ( self : Optional[int] ) ->List[str]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def lowercase_ ( self : Any, _snake_case : Optional[Any]=False, _snake_case : List[str]=False ) ->Union[str, Any]: def _flatten(_snake_case : List[str] ): return list(itertools.chain(*_snake_case ) ) if equal_length: snake_case__ : Any = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size snake_case__ : int = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: snake_case__ : Any = [np.asarray(_snake_case ) for x in speech_inputs] return speech_inputs def lowercase_ ( self : Union[str, Any], _snake_case : str=False, _snake_case : Dict=False ) ->List[str]: if equal_length: snake_case__ : Optional[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case__ : List[str] = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: snake_case__ : int = [np.asarray(_snake_case ) for x in speech_inputs] return speech_inputs @require_torch class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor def lowercase_ ( self : int ) ->Union[str, Any]: snake_case__ : List[str] = SpeechTaFeatureExtractionTester(self ) def lowercase_ ( self : Any, _snake_case : Dict ) ->Any: self.assertTrue(np.all(np.mean(_snake_case, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_snake_case, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase_ ( self : List[Any] ) ->Union[str, Any]: # Tests that all call wrap to encode_plus and batch_encode_plus snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case__ : int = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : Tuple = [np.asarray(_snake_case ) for speech_input in speech_inputs] # Test not batched input snake_case__ : str = feat_extract(speech_inputs[0], return_tensors='np' ).input_values snake_case__ : List[str] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(_snake_case, _snake_case, atol=1e-3 ) ) # Test batched snake_case__ : Any = feat_extract(_snake_case, return_tensors='np' ).input_values snake_case__ : Union[str, Any] = feat_extract(_snake_case, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case, _snake_case ): self.assertTrue(np.allclose(_snake_case, _snake_case, atol=1e-3 ) ) def lowercase_ ( self : int ) ->Optional[int]: snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : int = ['longest', 'max_length', 'do_not_pad'] snake_case__ : List[str] = [None, 1_6_0_0, None] for max_length, padding in zip(_snake_case, _snake_case ): snake_case__ : Optional[int] = feat_extract(_snake_case, padding=_snake_case, max_length=_snake_case, return_tensors='np' ) snake_case__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def lowercase_ ( self : Union[str, Any] ) ->Optional[Any]: snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Tuple = range(8_0_0, 1_4_0_0, 2_0_0 ) snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in lengths] snake_case__ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad'] snake_case__ : str = [None, 1_6_0_0, None] for max_length, padding in zip(_snake_case, _snake_case ): snake_case__ : List[str] = feat_extract(_snake_case, max_length=_snake_case, padding=_snake_case ) snake_case__ : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def lowercase_ ( self : List[Any] ) ->Optional[Any]: snake_case__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : str = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : Optional[Any] = feat_extract( _snake_case, truncation=_snake_case, max_length=1_0_0_0, padding='max_length', return_tensors='np' ) snake_case__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def lowercase_ ( self : int ) ->Union[str, Any]: snake_case__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : str = feat_extract( _snake_case, truncation=_snake_case, max_length=1_0_0_0, padding='longest', return_tensors='np' ) snake_case__ : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : List[str] = feat_extract( _snake_case, truncation=_snake_case, max_length=2_0_0_0, padding='longest', return_tensors='np' ) snake_case__ : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) def lowercase_ ( self : List[str] ) ->Dict: snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : List[Any] = np.random.rand(1_0_0 ).astype(np.floataa ) snake_case__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case__ : int = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) snake_case__ : Optional[int] = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : Optional[int] ) ->Optional[Any]: # Tests that all call wrap to encode_plus and batch_encode_plus snake_case__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )] snake_case__ : Dict = [np.asarray(_snake_case ) for speech_input in speech_inputs] # Test feature size snake_case__ : Optional[int] = feature_extractor(audio_target=_snake_case, padding=_snake_case, return_tensors='np' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input snake_case__ : Dict = feature_extractor(speech_inputs[0], return_tensors='np' ).input_values snake_case__ : Any = feature_extractor(np_speech_inputs[0], return_tensors='np' ).input_values self.assertTrue(np.allclose(_snake_case, _snake_case, atol=1e-3 ) ) # Test batched snake_case__ : Dict = feature_extractor(_snake_case, return_tensors='np' ).input_values snake_case__ : Dict = feature_extractor(_snake_case, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case, _snake_case ): self.assertTrue(np.allclose(_snake_case, _snake_case, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] snake_case__ : int = np.asarray(_snake_case ) snake_case__ : Union[str, Any] = feature_extractor(_snake_case, return_tensors='np' ).input_values snake_case__ : Union[str, Any] = feature_extractor(_snake_case, return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(_snake_case, _snake_case ): self.assertTrue(np.allclose(_snake_case, _snake_case, atol=1e-3 ) ) def lowercase_ ( self : Union[str, Any] ) ->str: snake_case__ : int = self.feat_extract_tester.prepare_inputs_for_target() snake_case__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case__ : Optional[Any] = feat_extract.model_input_names[0] snake_case__ : Tuple = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case, processed_features[input_name] ) ) ) snake_case__ : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case ) snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs}, tensor_type='np' ) snake_case__ : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: snake_case__ : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def lowercase_ ( self : List[str] ) ->Any: snake_case__ : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case ) snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) snake_case__ : Tuple = feat_extract.model_input_names[0] snake_case__ : List[Any] = BatchFeature({input_name: speech_inputs}, tensor_type='pt' ) snake_case__ : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: snake_case__ : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def lowercase_ ( self : Optional[int] ) ->Tuple: snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) snake_case__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target() snake_case__ : Optional[Any] = feat_extract.model_input_names[0] snake_case__ : List[str] = BatchFeature({input_name: speech_inputs} ) snake_case__ : int = feat_extract.num_mel_bins # hack! snake_case__ : Tuple = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' )[input_name] snake_case__ : Union[str, Any] = feat_extract.pad(_snake_case, padding='longest', return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def lowercase_ ( self : int ) ->Any: snake_case__ : Any = self.feat_extract_dict snake_case__ : List[Any] = True snake_case__ : Union[str, Any] = self.feature_extraction_class(**_snake_case ) snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target() snake_case__ : List[Any] = [len(_snake_case ) for x in speech_inputs] snake_case__ : Union[str, Any] = feat_extract.model_input_names[0] snake_case__ : Optional[int] = BatchFeature({input_name: speech_inputs} ) snake_case__ : List[str] = feat_extract.num_mel_bins # hack! snake_case__ : str = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' ) self.assertIn('attention_mask', _snake_case ) self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), _snake_case ) def lowercase_ ( self : Optional[int] ) ->str: snake_case__ : int = self.feat_extract_dict snake_case__ : List[str] = True snake_case__ : Tuple = self.feature_extraction_class(**_snake_case ) snake_case__ : List[str] = self.feat_extract_tester.prepare_inputs_for_target() snake_case__ : str = [len(_snake_case ) for x in speech_inputs] snake_case__ : Optional[Any] = feat_extract.model_input_names[0] snake_case__ : Optional[int] = BatchFeature({input_name: speech_inputs} ) snake_case__ : Optional[Any] = min(_snake_case ) snake_case__ : Union[str, Any] = feat_extract.num_mel_bins # hack! snake_case__ : Tuple = feat_extract.pad( _snake_case, padding='max_length', max_length=_snake_case, truncation=_snake_case, return_tensors='np' ) self.assertIn('attention_mask', _snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] ) def lowercase_ ( self : List[Any], _snake_case : Optional[int] ) ->Optional[Any]: from datasets import load_dataset snake_case__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' ) # automatic decoding with librispeech snake_case__ : Dict = ds.sort('id' ).select(range(_snake_case ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowercase_ ( self : str ) ->str: # fmt: off snake_case__ : List[Any] = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on snake_case__ : Union[str, Any] = self._load_datasamples(1 ) snake_case__ : Optional[int] = SpeechTaFeatureExtractor() snake_case__ : List[Any] = feature_extractor(_snake_case, return_tensors='pt' ).input_values self.assertEquals(input_values.shape, (1, 9_3_6_8_0) ) self.assertTrue(torch.allclose(input_values[0, :3_0], _snake_case, atol=1e-6 ) ) def lowercase_ ( self : Any ) ->str: # fmt: off snake_case__ : Optional[Any] = torch.tensor( [-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7, -3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6, -3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1, -3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] ) # fmt: on snake_case__ : List[str] = self._load_datasamples(1 ) snake_case__ : str = SpeechTaFeatureExtractor() snake_case__ : Optional[Any] = feature_extractor(audio_target=_snake_case, return_tensors='pt' ).input_values self.assertEquals(input_values.shape, (1, 3_6_6, 8_0) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0], _snake_case, atol=1e-4 ) )
277
0
from collections import Counter from timeit import timeit def A ( _UpperCAmelCase : str = "" , ) -> Union[str, Any]: '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def A ( _UpperCAmelCase : str = "" ) -> Optional[int]: '''simple docstring''' if len(_UpperCAmelCase ) == 0: return True _UpperCAmelCase = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _UpperCAmelCase = {} for character in lower_case_input_str: _UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1 _UpperCAmelCase = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def A ( _UpperCAmelCase : str = "" ) -> int: '''simple docstring''' print('\nFor string = ' , _UpperCAmelCase , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": UpperCAmelCase__ = input( "Enter string to determine if it can be rearranged as a palindrome or not: " ).strip() benchmark(check_str) UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
339
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum""" _SCREAMING_SNAKE_CASE = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) _SCREAMING_SNAKE_CASE = """summarizer""" _SCREAMING_SNAKE_CASE = AutoTokenizer _SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM _SCREAMING_SNAKE_CASE = ["""text"""] _SCREAMING_SNAKE_CASE = ["""text"""] def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Any: return self.pre_processor(_snake_case, return_tensors='pt', truncation=_snake_case ) def lowercase_ ( self : int, _snake_case : List[Any] ) ->Any: return self.model.generate(**_snake_case )[0] def lowercase_ ( self : int, _snake_case : int ) ->str: return self.pre_processor.decode(_snake_case, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
277
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class UpperCamelCase_ : '''simple docstring''' def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[int]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def _UpperCamelCase ( self ) -> int: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def _UpperCamelCase ( self , a , a , a , a , a , a ) -> str: snake_case_ = EsmForProteinFolding(config=_snake_case ).float() model.to(_snake_case ) model.eval() snake_case_ = model(_snake_case , attention_mask=_snake_case ) snake_case_ = model(_snake_case ) snake_case_ = model(_snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def _UpperCamelCase ( self ) -> Any: snake_case_ = self.prepare_config_and_inputs() ( snake_case_ ) = config_and_inputs snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = False lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else () lowerCAmelCase = () lowerCAmelCase = {} if is_torch_available() else {} lowerCAmelCase = False def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ = EsmFoldModelTester(self ) snake_case_ = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def _UpperCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def _UpperCamelCase ( self ) -> Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip('Does not support attention outputs' ) def _UpperCamelCase ( self ) -> Dict: pass @unittest.skip def _UpperCamelCase ( self ) -> int: pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCamelCase ( self ) -> Dict: pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip('ESMFold does not support passing input embeds!' ) def _UpperCamelCase ( self ) -> List[str]: pass @unittest.skip('ESMFold does not support head pruning.' ) def _UpperCamelCase ( self ) -> Union[str, Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def _UpperCamelCase ( self ) -> Union[str, Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def _UpperCamelCase ( self ) -> Optional[Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def _UpperCamelCase ( self ) -> Union[str, Any]: pass @unittest.skip('ESMFold does not support head pruning.' ) def _UpperCamelCase ( self ) -> Optional[int]: pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def _UpperCamelCase ( self ) -> Optional[Any]: pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def _UpperCamelCase ( self ) -> Union[str, Any]: pass @unittest.skip('ESMFold only has one output format.' ) def _UpperCamelCase ( self ) -> Tuple: pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def _UpperCamelCase ( self ) -> Dict: pass @unittest.skip('ESMFold does not support input chunking.' ) def _UpperCamelCase ( self ) -> List[str]: pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def _UpperCamelCase ( self ) -> Optional[Any]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def _UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def _UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def _UpperCamelCase ( self ) -> Dict: pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def _UpperCamelCase ( self ) -> List[str]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCamelCase ( self ) -> Union[str, Any]: pass @require_torch class UpperCamelCase_ ( lowerCAmelCase_ ): '''simple docstring''' @slow def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() snake_case_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) snake_case_ = model(_snake_case )['positions'] snake_case_ = torch.tensor([2.5_828, 0.7_993, -10.93_34] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _snake_case , atol=1E-4 ) )
178
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ (A : str , A : List[Any] , A : Any ): # Initialise PyTorch model snake_case__ : List[Any] = LxmertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case__ : List[str] = LxmertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": a_ :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a_ :Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
277
0
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: # Initialise PyTorch model SCREAMING_SNAKE_CASE = LxmertConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F'Building PyTorch model from configuration: {config}' ) SCREAMING_SNAKE_CASE = LxmertForPreTraining(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
113
import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() a_ :Tuple = logging.get_logger(__name__) a_ :List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } a_ :Optional[int] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase_ (A : Union[str, Any] , A : str , A : Dict , A : Optional[Any] , A : Optional[Any] ): for attribute in key.split('.' ): snake_case__ : Any = getattr(A , A ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(A , A ).shape else: snake_case__ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : Tuple = value elif weight_type == "weight_g": snake_case__ : Tuple = value elif weight_type == "weight_v": snake_case__ : List[Any] = value elif weight_type == "bias": snake_case__ : List[Any] = value else: snake_case__ : Optional[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase_ (A : str , A : Any ): snake_case__ : Union[str, Any] = [] snake_case__ : Union[str, Any] = fairseq_model.state_dict() snake_case__ : Union[str, Any] = hf_model.feature_extractor snake_case__ : Any = hf_model.adapter for name, value in fairseq_dict.items(): snake_case__ : Any = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) snake_case__ : List[Any] = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(A , A , A , A ) snake_case__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case__ : Tuple = True if "*" in mapped_key: snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2] snake_case__ : Optional[int] = mapped_key.replace('*' , A ) if "weight_g" in name: snake_case__ : Optional[int] = 'weight_g' elif "weight_v" in name: snake_case__ : Optional[Any] = 'weight_v' elif "bias" in name: snake_case__ : Union[str, Any] = 'bias' elif "weight" in name: snake_case__ : Optional[int] = 'weight' else: snake_case__ : Tuple = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase_ (A : Union[str, Any] , A : Any , A : str , A : str , A : int ): snake_case__ : str = full_name.split('conv_layers.' )[-1] snake_case__ : Optional[int] = name.split('.' ) snake_case__ : Tuple = int(items[0] ) snake_case__ : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) snake_case__ : Optional[int] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) def lowercase_ (A : Optional[Any] , A : Any , A : Tuple , A : Any ): snake_case__ : List[str] = full_name.split('adaptor.' )[-1] snake_case__ : Tuple = name.split('.' ) if items[1].isdigit(): snake_case__ : Optional[int] = int(items[1] ) else: snake_case__ : Any = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' snake_case__ : List[Any] = value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' snake_case__ : int = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' snake_case__ : str = value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' snake_case__ : Dict = value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(A , A ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' snake_case__ : List[str] = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' snake_case__ : List[str] = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(A ) def lowercase_ (A : int ): snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape snake_case__ : int = nn.Linear(A , A , bias=A ) snake_case__ : Optional[Any] = emb.weight.data return lin_layer @torch.no_grad() def lowercase_ (A : Tuple , A : Tuple , A : Any , A : Optional[Any] , A : int , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , ): snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained( A , add_adapter=A , adapter_stride=A , adapter_kernel_size=A , use_auth_token=A , output_hidden_size=A , ) snake_case__ : Dict = MBartConfig.from_pretrained(A ) # load model snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) snake_case__ : List[Any] = model[0].eval() # load feature extractor snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(A , use_auth_token=A ) # set weights for wav2vec2 encoder snake_case__ : List[str] = WavaVecaModel(A ) recursively_load_weights_wavaveca(model.encoder , A ) # load decoder weights snake_case__ : Any = MBartForCausalLM(A ) snake_case__ , snake_case__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) snake_case__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=A , decoder=A ) snake_case__ : str = False snake_case__ : int = MBartaaTokenizer(A ) tokenizer.save_pretrained(A ) snake_case__ : Any = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Union[str, Any] = tokenizer.bos_token_id snake_case__ : Dict = tokenizer.eos_token_id snake_case__ : Optional[int] = 'mbart50' snake_case__ : Union[str, Any] = 'wav2vec2' snake_case__ : List[str] = tokenizer.eos_token_id snake_case__ : Union[str, Any] = 2_5_0_0_0_4 snake_case__ : int = tokenizer.eos_token_id snake_case__ : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(A ) hf_wavavec.save_pretrained(A ) feature_extractor.save_pretrained(A ) if __name__ == "__main__": a_ :str = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config") a_ :Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
277
0
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _A : Tuple = logging.getLogger(__name__) def UpperCamelCase_ ( snake_case_ : str ) -> int: '''simple docstring''' __lowerCAmelCase = git.Repo(search_parent_directories=snake_case_ ) __lowerCAmelCase = { 'repo_id': str(snake_case_ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(snake_case_ , """git_log.json""" ) , """w""" ) as f: json.dump(snake_case_ , snake_case_ , indent=4 ) def UpperCamelCase_ ( snake_case_ : Tuple ) -> Tuple: '''simple docstring''' if params.n_gpu <= 0: __lowerCAmelCase = 0 __lowerCAmelCase = -1 __lowerCAmelCase = True __lowerCAmelCase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 __lowerCAmelCase = int(os.environ["""WORLD_SIZE"""] ) __lowerCAmelCase = int(os.environ["""N_GPU_NODE"""] ) __lowerCAmelCase = int(os.environ["""RANK"""] ) # number of nodes / node ID __lowerCAmelCase = params.world_size // params.n_gpu_per_node __lowerCAmelCase = params.global_rank // params.n_gpu_per_node __lowerCAmelCase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 __lowerCAmelCase = 1 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 __lowerCAmelCase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __lowerCAmelCase = params.node_id == 0 and params.local_rank == 0 __lowerCAmelCase = params.n_nodes > 1 # summary __lowerCAmelCase = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
229
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ :Tuple = logging.get_logger(__name__) a_ :Union[str, Any] = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """deberta-v2""" def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]: super().__init__(**_snake_case ) snake_case__ : Dict = hidden_size snake_case__ : Optional[int] = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : List[Any] = intermediate_size snake_case__ : List[Any] = hidden_act snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : List[str] = max_position_embeddings snake_case__ : List[str] = type_vocab_size snake_case__ : Optional[Any] = initializer_range snake_case__ : Optional[int] = relative_attention snake_case__ : Tuple = max_relative_positions snake_case__ : Union[str, Any] = pad_token_id snake_case__ : Optional[int] = position_biased_input # Backwards compatibility if type(_snake_case ) == str: snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )] snake_case__ : List[str] = pos_att_type snake_case__ : Union[str, Any] = vocab_size snake_case__ : Optional[int] = layer_norm_eps snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case ) snake_case__ : int = pooler_dropout snake_case__ : str = pooler_hidden_act class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @property def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case__ : int = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def lowercase_ ( self : Dict ) ->int: return 1_2 def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]: snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
277
0
"""simple docstring""" def lowercase ( a__ : str , a__ : List[Any] , a__ : Dict ) -> Optional[Any]: if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(a__ , n - 1 , a__ ) * a) % mod else: _UpperCamelCase = binary_exponentiation(a__ , n / 2 , a__ ) return (b * b) % mod # a prime number UpperCAmelCase = 701 UpperCAmelCase = 1_000_000_000 UpperCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
256
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() a_ :str = logging.get_logger(__name__) def lowercase_ (A : str ): snake_case__ : Tuple = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) snake_case__ : List[Any] = MaskFormerConfig(backbone_config=A ) snake_case__ : Union[str, Any] = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok snake_case__ : Dict = 8_4_7 snake_case__ : List[str] = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok snake_case__ : Union[str, Any] = 1_5_0 snake_case__ : Any = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok snake_case__ : List[str] = 1_7_1 snake_case__ : Union[str, Any] = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO snake_case__ : Dict = 1_3_3 snake_case__ : str = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok snake_case__ : List[str] = 1_9 snake_case__ : Union[str, Any] = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok snake_case__ : Tuple = 6_5 snake_case__ : List[str] = 'mapillary-vistas-id2label.json' snake_case__ : Dict = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) snake_case__ : List[str] = {int(A ): v for k, v in idalabel.items()} return config def lowercase_ (A : Any ): snake_case__ : Optional[int] = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def lowercase_ (A : Tuple , A : Tuple , A : Optional[Any] ): snake_case__ : Optional[int] = dct.pop(A ) snake_case__ : Union[str, Any] = val def lowercase_ (A : Optional[Any] , A : Tuple ): snake_case__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): snake_case__ : Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) snake_case__ : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) snake_case__ : Tuple = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ : str = in_proj_weight[:dim, :] snake_case__ : int = in_proj_bias[: dim] snake_case__ : List[Any] = in_proj_weight[ dim : dim * 2, : ] snake_case__ : List[str] = in_proj_bias[ dim : dim * 2 ] snake_case__ : List[Any] = in_proj_weight[ -dim :, : ] snake_case__ : Dict = in_proj_bias[-dim :] # fmt: on def lowercase_ (A : List[str] , A : List[Any] ): # fmt: off snake_case__ : str = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) snake_case__ : int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[: hidden_size, :] snake_case__ : Tuple = in_proj_bias[:config.hidden_size] snake_case__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case__ : Dict = in_proj_bias[hidden_size : hidden_size * 2] snake_case__ : Any = in_proj_weight[-hidden_size :, :] snake_case__ : int = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) snake_case__ : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Optional[int] = in_proj_weight[: hidden_size, :] snake_case__ : Optional[Any] = in_proj_bias[:config.hidden_size] snake_case__ : int = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2] snake_case__ : List[str] = in_proj_weight[-hidden_size :, :] snake_case__ : str = in_proj_bias[-hidden_size :] # fmt: on def lowercase_ (): snake_case__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ : int = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def lowercase_ (A : str , A : str , A : str , A : bool = False ): snake_case__ : Optional[int] = get_maskformer_config(A ) # load original state_dict with open(A , 'rb' ) as f: snake_case__ : List[Any] = pickle.load(A ) snake_case__ : Optional[int] = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys snake_case__ : List[str] = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_swin_q_k_v(A , config.backbone_config ) read_in_decoder_q_k_v(A , A ) # update to torch tensors for key, value in state_dict.items(): snake_case__ : int = torch.from_numpy(A ) # load 🤗 model snake_case__ : str = MaskFormerForInstanceSegmentation(A ) model.eval() for name, param in model.named_parameters(): print(A , param.shape ) snake_case__ , snake_case__ : Union[str, Any] = model.load_state_dict(A , strict=A ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results snake_case__ : Optional[Any] = prepare_img() if "vistas" in model_name: snake_case__ : int = 6_5 elif "cityscapes" in model_name: snake_case__ : Dict = 6_5_5_3_5 else: snake_case__ : Tuple = 2_5_5 snake_case__ : Optional[int] = True if 'ade' in model_name else False snake_case__ : Dict = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A ) snake_case__ : Any = image_processor(A , return_tensors='pt' ) snake_case__ : Any = model(**A ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": snake_case__ : Tuple = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": a_ :Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a_ :Dict = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
277
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) _a = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } _a = { "b0": { "hidden_dim": 1_2_8_0, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 2_2_4, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 1_2_8_0, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 2_4_0, "dropout_rate": 0.2, "dw_padding": [1_6], }, "b2": { "hidden_dim": 1_4_0_8, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 2_6_0, "dropout_rate": 0.3, "dw_padding": [5, 8, 1_6], }, "b3": { "hidden_dim": 1_5_3_6, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 3_0_0, "dropout_rate": 0.3, "dw_padding": [5, 1_8], }, "b4": { "hidden_dim": 1_7_9_2, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 3_8_0, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 2_0_4_8, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 4_5_6, "dropout_rate": 0.4, "dw_padding": [1_3, 2_7], }, "b6": { "hidden_dim": 2_3_0_4, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 5_2_8, "dropout_rate": 0.5, "dw_padding": [3_1], }, "b7": { "hidden_dim": 2_5_6_0, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 6_0_0, "dropout_rate": 0.5, "dw_padding": [1_8], }, } def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" __lowerCAmelCase: List[Any] = EfficientNetConfig() __lowerCAmelCase: List[str] = CONFIG_MAP[model_name]['hidden_dim'] __lowerCAmelCase: List[str] = CONFIG_MAP[model_name]['width_coef'] __lowerCAmelCase: str = CONFIG_MAP[model_name]['depth_coef'] __lowerCAmelCase: List[Any] = CONFIG_MAP[model_name]['image_size'] __lowerCAmelCase: int = CONFIG_MAP[model_name]['dropout_rate'] __lowerCAmelCase: List[Any] = CONFIG_MAP[model_name]['dw_padding'] __lowerCAmelCase: List[str] = 'huggingface/label-files' __lowerCAmelCase: Tuple = 'imagenet-1k-id2label.json' __lowerCAmelCase: int = 10_00 __lowerCAmelCase: List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowerCAmelCase: Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowerCAmelCase: List[str] = idalabel __lowerCAmelCase: str = {v: k for k, v in idalabel.items()} return config def _a ( ) -> Dict: """simple docstring""" __lowerCAmelCase: List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase: Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im def _a ( SCREAMING_SNAKE_CASE : Any ) -> List[str]: """simple docstring""" __lowerCAmelCase: int = CONFIG_MAP[model_name]['image_size'] __lowerCAmelCase: Any = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=SCREAMING_SNAKE_CASE , ) return preprocessor def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __lowerCAmelCase: Any = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] __lowerCAmelCase: str = sorted(set(SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: List[Any] = len(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )} __lowerCAmelCase: Tuple = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: __lowerCAmelCase: List[Any] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) __lowerCAmelCase: Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __lowerCAmelCase: Dict = 'efficientnet.' + item[1] __lowerCAmelCase: List[str] = 'classifier.weight' __lowerCAmelCase: List[Any] = 'classifier.bias' return key_mapping def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue __lowerCAmelCase: Optional[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __lowerCAmelCase: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __lowerCAmelCase: List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __lowerCAmelCase: List[Any] = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) ) else: __lowerCAmelCase: Any = torch.from_numpy(SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> str: """simple docstring""" __lowerCAmelCase: Tuple = model_classes[model_name]( include_top=SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=10_00 , classifier_activation='softmax' , ) __lowerCAmelCase: Optional[Any] = original_model.trainable_variables __lowerCAmelCase: Optional[Any] = original_model.non_trainable_variables __lowerCAmelCase: List[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __lowerCAmelCase: Optional[Any] = param.numpy() __lowerCAmelCase: Optional[int] = list(tf_params.keys() ) # Load HuggingFace model __lowerCAmelCase: int = get_efficientnet_config(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[Any] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval() __lowerCAmelCase: Union[str, Any] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) __lowerCAmelCase: Optional[int] = rename_keys(SCREAMING_SNAKE_CASE ) replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image __lowerCAmelCase: Optional[int] = convert_image_processor(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Any = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): __lowerCAmelCase: Union[str, Any] = hf_model(**SCREAMING_SNAKE_CASE ) __lowerCAmelCase: List[str] = outputs.logits.detach().numpy() # Original model inference __lowerCAmelCase: Union[str, Any] = False __lowerCAmelCase: Optional[Any] = CONFIG_MAP[model_name]['image_size'] __lowerCAmelCase: int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __lowerCAmelCase: int = image.img_to_array(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 ) __lowerCAmelCase: Optional[Any] = original_model.predict(SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(SCREAMING_SNAKE_CASE ): os.mkdir(SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __lowerCAmelCase: List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') _a = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
322
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class snake_case__ : """simple docstring""" def __init__( self : List[str], _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=7, _snake_case : int=True, _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=9_9, _snake_case : Optional[Any]=3_2, _snake_case : Tuple=5, _snake_case : str=4, _snake_case : Any=3_7, _snake_case : int="gelu", _snake_case : Optional[Any]=0.1, _snake_case : str=0.1, _snake_case : str=5_1_2, _snake_case : Dict=1_6, _snake_case : str=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : Optional[int]=3, _snake_case : Union[str, Any]=4, _snake_case : Tuple=None, ) ->Optional[Any]: snake_case__ : Optional[int] = parent snake_case__ : List[Any] = batch_size snake_case__ : Tuple = seq_length snake_case__ : str = is_training snake_case__ : Optional[int] = use_token_type_ids snake_case__ : Any = use_labels snake_case__ : Dict = vocab_size snake_case__ : str = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : Union[str, Any] = intermediate_size snake_case__ : List[Any] = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : str = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : Union[str, Any] = type_vocab_size snake_case__ : Optional[Any] = type_sequence_label_size snake_case__ : Optional[int] = initializer_range snake_case__ : Optional[int] = num_labels snake_case__ : str = num_choices snake_case__ : int = scope snake_case__ : List[str] = self.vocab_size - 1 def lowercase_ ( self : Union[str, Any] ) ->Tuple: snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case__ : List[str] = None if self.use_token_type_ids: snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) snake_case__ : Tuple = None snake_case__ : str = None snake_case__ : List[Any] = None if self.use_labels: snake_case__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_choices ) snake_case__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) snake_case__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Any, _snake_case : List[Any], _snake_case : Tuple, *_snake_case : Optional[Any] ) ->Tuple: snake_case__ : Union[str, Any] = OpenAIGPTModel(config=_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, head_mask=_snake_case ) snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case ) snake_case__ : Optional[Any] = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : List[Any], *_snake_case : Dict ) ->Optional[int]: snake_case__ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Tuple = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], *_snake_case : List[Any] ) ->Optional[int]: snake_case__ : List[str] = OpenAIGPTDoubleHeadsModel(_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[Any], *_snake_case : Union[str, Any] ) ->str: snake_case__ : List[str] = self.num_labels snake_case__ : Dict = OpenAIGPTForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case__ : List[str] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict ) ->int: snake_case__ : List[Any] = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Optional[Any] = config_and_inputs snake_case__ : str = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : Any=False ) ->Tuple: snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": snake_case__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=_snake_case, ) snake_case__ : List[Any] = inputs_dict['labels'] snake_case__ : List[Any] = inputs_dict['labels'] snake_case__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=_snake_case, ) snake_case__ : Tuple = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=_snake_case ) return inputs_dict def lowercase_ ( self : Union[str, Any] ) ->List[str]: snake_case__ : List[str] = OpenAIGPTModelTester(self ) snake_case__ : Any = ConfigTester(self, config_class=_snake_case, n_embd=3_7 ) def lowercase_ ( self : Optional[int] ) ->str: self.config_tester.run_common_tests() def lowercase_ ( self : int ) ->Tuple: snake_case__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_snake_case ) def lowercase_ ( self : Tuple ) ->List[str]: snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) def lowercase_ ( self : Dict ) ->int: snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_snake_case ) def lowercase_ ( self : int ) ->str: snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case ) @slow def lowercase_ ( self : Optional[Any] ) ->str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : Optional[int] = OpenAIGPTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @require_torch class snake_case__ ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self : Tuple ) ->Optional[int]: snake_case__ : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_snake_case ) snake_case__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=_snake_case ) # the president is snake_case__ : int = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the snake_case__ : Optional[int] = model.generate(_snake_case, do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist(), _snake_case )
277
0
import qiskit def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase_ = qiskit.QuantumCircuit(_A , _A ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCAmelCase_ = qiskit.execute(_A , _A , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_A ) if __name__ == "__main__": print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
278
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class A ( __UpperCAmelCase ): __snake_case = 'segformer' def __init__( self, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=[2, 2, 2, 2], UpperCamelCase__=[8, 4, 2, 1], UpperCamelCase__=[32, 64, 160, 256], UpperCamelCase__=[7, 3, 3, 3], UpperCamelCase__=[4, 2, 2, 2], UpperCamelCase__=[1, 2, 5, 8], UpperCamelCase__=[4, 4, 4, 4], UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=0.02, UpperCamelCase__=0.1, UpperCamelCase__=1E-6, UpperCamelCase__=256, UpperCamelCase__=255, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''', UpperCamelCase__, ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = num_encoder_blocks lowerCAmelCase_ = depths lowerCAmelCase_ = sr_ratios lowerCAmelCase_ = hidden_sizes lowerCAmelCase_ = patch_sizes lowerCAmelCase_ = strides lowerCAmelCase_ = mlp_ratios lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = decoder_hidden_size lowerCAmelCase_ = kwargs.get('''reshape_last_stage''', UpperCamelCase__ ) lowerCAmelCase_ = semantic_loss_ignore_index class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4 @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 12
278
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
1
import torch from diffusers import StableDiffusionPipeline _A = '''path-to-your-trained-model''' _A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') _A = '''A photo of sks dog in a bucket''' _A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
278
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME _A = ['''small''', '''medium''', '''large'''] _A = '''lm_head.decoder.weight''' _A = '''lm_head.weight''' def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = torch.load(_A ) lowerCAmelCase_ = d.pop(_A ) os.makedirs(_A , exist_ok=_A ) torch.save(_A , os.path.join(_A , _A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) _A = parser.parse_args() for MODEL in DIALOGPT_MODELS: _A = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _A = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
278
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): __snake_case = 'timm_backbone' def __init__( self, UpperCamelCase__=None, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = backbone lowerCAmelCase_ = num_channels lowerCAmelCase_ = features_only lowerCAmelCase_ = use_pretrained_backbone lowerCAmelCase_ = True lowerCAmelCase_ = out_indices if out_indices is not None else (-1,)
278
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
1
import itertools import math def __UpperCamelCase ( _A ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCamelCase ( ): lowerCAmelCase_ = 2 while True: if is_prime(_A ): yield num num += 1 def __UpperCamelCase ( _A = 10001 ): return next(itertools.islice(prime_generator() , nth - 1 , _A ) ) if __name__ == "__main__": print(f"{solution() = }")
278
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) _A = logging.getLogger(__name__) _A = tf.data.AUTOTUNE def __UpperCamelCase ( ): lowerCAmelCase_ = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=_A , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=_A , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=_A , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=_A , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=_A , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=_A , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=_A , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=_A , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=_A , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=_A , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=_A , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=_A , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=_A , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=_A , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=_A , required=_A , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=_A , help='''Model ID to upload to on the Hugging Face Hub.''' ) lowerCAmelCase_ = parser.parse_args() return args def __UpperCamelCase ( _A ): try: if args.tpu_name: lowerCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: lowerCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(_A ) tf.tpu.experimental.initialize_tpu_system(_A ) return tpu def __UpperCamelCase ( _A ): lowerCAmelCase_ = 0 for file in file_list: lowerCAmelCase_ = file.split('''/''' )[-1] lowerCAmelCase_ = re.search(r'''-\d+-(\d+)\.tfrecord''' , _A ).group(1 ) lowerCAmelCase_ = int(_A ) num_samples += sample_count return num_samples def __UpperCamelCase ( _A , _A , _A , _A , _A , _A=None ): lowerCAmelCase_ = count_samples(_A ) lowerCAmelCase_ = tf.data.Dataset.from_tensor_slices(_A ) if shuffle: lowerCAmelCase_ = dataset.shuffle(len(_A ) ) lowerCAmelCase_ = tf.data.TFRecordDataset(_A , num_parallel_reads=_A ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowerCAmelCase_ = dataset.apply(tf.data.experimental.assert_cardinality(_A ) ) lowerCAmelCase_ = dataset.map(_A , num_parallel_calls=_A ) if shuffle: assert shuffle_buffer_size is not None lowerCAmelCase_ = dataset.shuffle(args.shuffle_buffer_size ) lowerCAmelCase_ = dataset.batch(_A , drop_remainder=_A ) lowerCAmelCase_ = dataset.map(_A , num_parallel_calls=_A ) lowerCAmelCase_ = dataset.prefetch(_A ) return dataset def __UpperCamelCase ( _A ): if not args.no_tpu: lowerCAmelCase_ = initialize_tpu(_A ) lowerCAmelCase_ = tf.distribute.TPUStrategy(_A ) else: lowerCAmelCase_ = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer ) lowerCAmelCase_ = AutoConfig.from_pretrained(args.pretrained_model_config ) lowerCAmelCase_ = tokenizer.vocab_size lowerCAmelCase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}." ) lowerCAmelCase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." ) lowerCAmelCase_ = count_samples(_A ) lowerCAmelCase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowerCAmelCase_ = steps_per_epoch * args.num_epochs with strategy.scope(): lowerCAmelCase_ = TFAutoModelForMaskedLM.from_config(_A ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowerCAmelCase_ , lowerCAmelCase_ = create_optimizer( num_train_steps=_A , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_A , metrics=['''accuracy'''] ) def decode_fn(_A ): lowerCAmelCase_ = { '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_A , _A ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowerCAmelCase_ = DataCollatorForLanguageModeling( tokenizer=_A , mlm_probability=args.mlm_probability , mlm=_A , return_tensors='''tf''' ) def mask_with_collator(_A ): # TF really needs an isin() function lowerCAmelCase_ = ( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) lowerCAmelCase_ , lowerCAmelCase_ = data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(_A ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_A , ) return batch lowerCAmelCase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync lowerCAmelCase_ = prepare_dataset( _A , decode_fn=_A , mask_fn=_A , batch_size=_A , shuffle=_A , shuffle_buffer_size=args.shuffle_buffer_size , ) lowerCAmelCase_ = prepare_dataset( _A , decode_fn=_A , mask_fn=_A , batch_size=_A , shuffle=_A , ) lowerCAmelCase_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_A ) ) model.fit( _A , validation_data=_A , epochs=args.num_epochs , callbacks=_A , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": _A = parse_args() main(args)
278
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _A = get_logger(__name__) def __UpperCamelCase ( _A , _A , _A , _A , _A=0 ): os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCAmelCase_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCAmelCase_ = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin" lowerCAmelCase_ = os.path.join(_A , _A ) if accelerator.process_index == 0: logger.info(f"Saving model to {output_model_file}" ) torch.save(_A , _A ) logger.info(f"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCAmelCase_ = ( f"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) lowerCAmelCase_ = os.path.join(_A , _A ) logger.info(f"Saving model to {output_model_file}" ) torch.save(_A , _A ) logger.info(f"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCAmelCase_ = os.path.join(_A , f"{MODEL_NAME}_{model_index}" ) os.makedirs(_A , exist_ok=_A ) logger.info(f"Saving model to {ckpt_dir}" ) lowerCAmelCase_ = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(f"Model saved to {ckpt_dir}" ) def __UpperCamelCase ( _A , _A , _A , _A , _A=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_A ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return lowerCAmelCase_ = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin" lowerCAmelCase_ = os.path.join(_A , _A ) logger.info(f"Loading model from {input_model_file}" ) lowerCAmelCase_ = torch.load(_A ) logger.info(f"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCAmelCase_ = ( f"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) lowerCAmelCase_ = os.path.join(_A , _A ) logger.info(f"Loading model from {input_model_file}" ) lowerCAmelCase_ = torch.load(_A ) logger.info(f"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCAmelCase_ = ( os.path.join(_A , f"{MODEL_NAME}_{model_index}" ) if f"{MODEL_NAME}" not in input_dir else input_dir ) logger.info(f"Loading model from {ckpt_dir}" ) lowerCAmelCase_ = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , ) lowerCAmelCase_ = state_dict['''model'''] logger.info(f"Model loaded from {ckpt_dir}" ) model.load_state_dict(_A ) def __UpperCamelCase ( _A , _A , _A , _A , _A , _A=0 ): os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCAmelCase_ = FSDP.optim_state_dict(_A , _A ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCAmelCase_ = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) lowerCAmelCase_ = os.path.join(_A , _A ) logger.info(f"Saving Optimizer state to {output_optimizer_file}" ) torch.save(_A , _A ) logger.info(f"Optimizer state saved in {output_optimizer_file}" ) else: lowerCAmelCase_ = os.path.join(_A , f"{OPTIMIZER_NAME}_{optimizer_index}" ) os.makedirs(_A , exist_ok=_A ) logger.info(f"Saving Optimizer state to {ckpt_dir}" ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(f"Optimizer state saved in {ckpt_dir}" ) def __UpperCamelCase ( _A , _A , _A , _A , _A , _A=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCAmelCase_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCAmelCase_ = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) lowerCAmelCase_ = os.path.join(_A , _A ) logger.info(f"Loading Optimizer state from {input_optimizer_file}" ) lowerCAmelCase_ = torch.load(_A ) logger.info(f"Optimizer state loaded from {input_optimizer_file}" ) else: lowerCAmelCase_ = ( os.path.join(_A , f"{OPTIMIZER_NAME}_{optimizer_index}" ) if f"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(f"Loading Optimizer from {ckpt_dir}" ) lowerCAmelCase_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(_A ) , ) lowerCAmelCase_ = optim_state['''optimizer'''] logger.info(f"Optimizer loaded from {ckpt_dir}" ) lowerCAmelCase_ = FSDP.optim_state_dict_to_load(_A , _A , _A ) optimizer.load_state_dict(_A )
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _A = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
278
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
1
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __UpperCamelCase ( _A , _A="shi-labs/oneformer_demo" ): with open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) as f: lowerCAmelCase_ = json.load(_A ) lowerCAmelCase_ = {} lowerCAmelCase_ = [] lowerCAmelCase_ = [] for key, info in class_info.items(): lowerCAmelCase_ = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(_A ) ) lowerCAmelCase_ = thing_ids lowerCAmelCase_ = class_names return metadata class A ( unittest.TestCase ): def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=10, UpperCamelCase__=False, UpperCamelCase__=255, UpperCamelCase__="shi-labs/oneformer_demo", UpperCamelCase__="ade20k_panoptic.json", UpperCamelCase__=10, ): """simple docstring""" lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = min_resolution lowerCAmelCase_ = max_resolution lowerCAmelCase_ = do_resize lowerCAmelCase_ = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean lowerCAmelCase_ = image_std lowerCAmelCase_ = class_info_file lowerCAmelCase_ = prepare_metadata(UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = num_text lowerCAmelCase_ = repo_path # for the post_process_functions lowerCAmelCase_ = 2 lowerCAmelCase_ = 10 lowerCAmelCase_ = 10 lowerCAmelCase_ = 3 lowerCAmelCase_ = 4 lowerCAmelCase_ = num_labels lowerCAmelCase_ = do_reduce_labels lowerCAmelCase_ = ignore_index def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ): """simple docstring""" if not batched: lowerCAmelCase_ = image_inputs[0] if isinstance(UpperCamelCase__, Image.Image ): lowerCAmelCase_ , lowerCAmelCase_ = image.size else: lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2] if w < h: lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w ) lowerCAmelCase_ = self.size['''shortest_edge'''] elif w > h: lowerCAmelCase_ = self.size['''shortest_edge'''] lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h ) else: lowerCAmelCase_ = self.size['''shortest_edge'''] lowerCAmelCase_ = self.size['''shortest_edge'''] else: lowerCAmelCase_ = [] for image in image_inputs: lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0] lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), ) @require_torch @require_vision class A ( __UpperCAmelCase , unittest.TestCase ): __snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __snake_case = image_processing_class def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''ignore_index''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''class_info_file''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''num_text''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''repo_path''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''metadata''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_reduce_labels''' ) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__, Image.Image ) # Test not batched input lowerCAmelCase_ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ ) lowerCAmelCase_ = image_processor( UpperCamelCase__, ['''semantic'''] * len(UpperCamelCase__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__, np.ndarray ) # Test not batched input lowerCAmelCase_ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ ) lowerCAmelCase_ = image_processor( UpperCamelCase__, ['''semantic'''] * len(UpperCamelCase__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__, torch.Tensor ) # Test not batched input lowerCAmelCase_ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCAmelCase_ , lowerCAmelCase_ = self.image_processing_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ ) lowerCAmelCase_ = image_processor( UpperCamelCase__, ['''semantic'''] * len(UpperCamelCase__ ), return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ), ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__="np" ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCAmelCase_ = self.image_processing_tester.num_labels lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase__ ) if with_segmentation_maps: lowerCAmelCase_ = num_labels if is_instance_map: lowerCAmelCase_ = list(range(UpperCamelCase__ ) ) * 2 lowerCAmelCase_ = dict(enumerate(UpperCamelCase__ ) ) lowerCAmelCase_ = [ np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCAmelCase_ = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations] lowerCAmelCase_ = image_processor( UpperCamelCase__, ['''semantic'''] * len(UpperCamelCase__ ), UpperCamelCase__, return_tensors='''pt''', instance_id_to_semantic_id=UpperCamelCase__, pad_and_return_pixel_mask=UpperCamelCase__, ) return inputs def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" def common(UpperCamelCase__=False, UpperCamelCase__=None ): lowerCAmelCase_ = self.comm_get_image_processor_inputs( with_segmentation_maps=UpperCamelCase__, is_instance_map=UpperCamelCase__, segmentation_type=UpperCamelCase__ ) lowerCAmelCase_ = inputs['''mask_labels'''] lowerCAmelCase_ = inputs['''class_labels'''] lowerCAmelCase_ = inputs['''pixel_values'''] lowerCAmelCase_ = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): self.assertEqual(mask_label.shape[0], class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] ) self.assertEqual(len(UpperCamelCase__ ), self.image_processing_tester.num_text ) common() common(is_instance_map=UpperCamelCase__ ) common(is_instance_map=UpperCamelCase__, segmentation_type='''pil''' ) common(is_instance_map=UpperCamelCase__, segmentation_type='''pil''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = np.zeros((20, 50) ) lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = binary_mask_to_rle(UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ), 4 ) self.assertEqual(rle[0], 21 ) self.assertEqual(rle[1], 45 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_ = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ), self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape, ( self.image_processing_tester.height, self.image_processing_tester.width, ), ) lowerCAmelCase_ = [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCAmelCase_ = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__, target_sizes=UpperCamelCase__ ) self.assertEqual(segmentation[0].shape, target_sizes[0] ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_ = image_processor.post_process_instance_segmentation(UpperCamelCase__, threshold=0 ) self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ), UpperCamelCase__ ) self.assertEqual( el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class( num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', ) lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_ = image_processor.post_process_panoptic_segmentation(UpperCamelCase__, threshold=0 ) self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ), UpperCamelCase__ ) self.assertEqual( el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
278
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
1
from math import asin, atan, cos, radians, sin, sqrt, tan _A = 6_378_137.0 _A = 6_356_752.314_245 _A = 6_378_137 def __UpperCamelCase ( _A , _A , _A , _A ): lowerCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A lowerCAmelCase_ = atan((1 - flattening) * tan(radians(_A ) ) ) lowerCAmelCase_ = atan((1 - flattening) * tan(radians(_A ) ) ) lowerCAmelCase_ = radians(_A ) lowerCAmelCase_ = radians(_A ) # Equation lowerCAmelCase_ = sin((phi_a - phi_a) / 2 ) lowerCAmelCase_ = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda lowerCAmelCase_ = sqrt(sin_sq_phi + (cos(_A ) * cos(_A ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_A ) if __name__ == "__main__": import doctest doctest.testmod()
278
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
278
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __UpperCamelCase ( _A , _A , _A ): # Construct model if gpta_config_file == "": lowerCAmelCase_ = GPTaConfig() else: lowerCAmelCase_ = GPTaConfig.from_json_file(_A ) lowerCAmelCase_ = GPTaModel(_A ) # Load weights from numpy load_tf_weights_in_gpta(_A , _A , _A ) # Save pytorch-model lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(model.state_dict() , _A ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) _A = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
278
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
1
from math import ceil def __UpperCamelCase ( _A = 1001 ): lowerCAmelCase_ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCAmelCase_ = 2 * i + 1 lowerCAmelCase_ = 2 * i lowerCAmelCase_ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: _A = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
278
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
1
_A = { 0: '''0''', 1: '''1''', 2: '''2''', 3: '''3''', 4: '''4''', 5: '''5''', 6: '''6''', 7: '''7''', 8: '''8''', 9: '''9''', 10: '''a''', 11: '''b''', 12: '''c''', 13: '''d''', 14: '''e''', 15: '''f''', } def __UpperCamelCase ( _A ): assert type(_A ) in (int, float) and decimal == int(_A ) lowerCAmelCase_ = int(_A ) lowerCAmelCase_ = '''''' lowerCAmelCase_ = False if decimal < 0: lowerCAmelCase_ = True decimal *= -1 while decimal > 0: lowerCAmelCase_ , lowerCAmelCase_ = divmod(_A , 16 ) lowerCAmelCase_ = values[remainder] + hexadecimal lowerCAmelCase_ = '''0x''' + hexadecimal if negative: lowerCAmelCase_ = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A ( __UpperCAmelCase ): __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'LayoutLMv2ImageProcessor' __snake_case = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', UpperCamelCase__, ) lowerCAmelCase_ = kwargs.pop('''feature_extractor''' ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCamelCase__, UpperCamelCase__ ) def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = True, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = 0, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, **UpperCamelCase__, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=UpperCamelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase__, UpperCamelCase__ ): lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase_ = features['''words'''] lowerCAmelCase_ = self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, ) # add pixel values lowerCAmelCase_ = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCAmelCase_ = self.get_overflowing_images(UpperCamelCase__, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCAmelCase_ = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f" {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}" ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, ) return self.image_processor
278
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A = logging.get_logger(__name__) _A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A = { '''vocab_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''', }, '''merges_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''', }, } _A = { '''gpt2''': 1_024, '''gpt2-medium''': 1_024, '''gpt2-large''': 1_024, '''gpt2-xl''': 1_024, '''distilgpt2''': 1_024, } class A ( __UpperCAmelCase ): __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ['input_ids', 'attention_mask'] __snake_case = GPTaTokenizer def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="<|endoftext|>", UpperCamelCase__="<|endoftext|>", UpperCamelCase__="<|endoftext|>", UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" super().__init__( UpperCamelCase__, UpperCamelCase__, tokenizer_file=UpperCamelCase__, unk_token=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, **UpperCamelCase__, ) lowerCAmelCase_ = kwargs.pop('''add_bos_token''', UpperCamelCase__ ) lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''', UpperCamelCase__ ) != add_prefix_space: lowerCAmelCase_ = getattr(UpperCamelCase__, pre_tok_state.pop('''type''' ) ) lowerCAmelCase_ = add_prefix_space lowerCAmelCase_ = pre_tok_class(**UpperCamelCase__ ) lowerCAmelCase_ = add_prefix_space def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = kwargs.get('''is_split_into_words''', UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = kwargs.get('''is_split_into_words''', UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = self._tokenizer.model.save(UpperCamelCase__, name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: lowerCAmelCase_ = input_ids[-self.model_max_length :] return input_ids
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class A ( __UpperCAmelCase ): __snake_case = 'mvp' __snake_case = ['past_key_values'] __snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, UpperCamelCase__=5_0267, UpperCamelCase__=1024, UpperCamelCase__=12, UpperCamelCase__=4096, UpperCamelCase__=16, UpperCamelCase__=12, UpperCamelCase__=4096, UpperCamelCase__=16, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__="gelu", UpperCamelCase__=1024, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=0.0, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=True, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=100, UpperCamelCase__=800, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = d_model lowerCAmelCase_ = encoder_ffn_dim lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = encoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = activation_function lowerCAmelCase_ = init_std lowerCAmelCase_ = encoder_layerdrop lowerCAmelCase_ = decoder_layerdrop lowerCAmelCase_ = classifier_dropout lowerCAmelCase_ = use_cache lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase_ = use_prompt lowerCAmelCase_ = prompt_length lowerCAmelCase_ = prompt_mid_dim super().__init__( pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, forced_eos_token_id=UpperCamelCase__, **UpperCamelCase__, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', UpperCamelCase__ ): lowerCAmelCase_ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' )
278
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
1
from __future__ import annotations def __UpperCamelCase ( _A , _A , _A , ): if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
278
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def __UpperCamelCase ( _A , _A=False ): try: lowerCAmelCase_ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCAmelCase_ = default else: # KEY is set, convert it to True or False. try: lowerCAmelCase_ = strtobool(_A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value _A = parse_flag_from_env('''RUN_SLOW''', default=False) _A = parse_flag_from_env('''RUN_REMOTE''', default=False) _A = parse_flag_from_env('''RUN_LOCAL''', default=True) _A = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression _A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') _A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') _A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio _A = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam _A = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility _A = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows _A = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def __UpperCamelCase ( _A ): try: import faiss # noqa except ImportError: lowerCAmelCase_ = unittest.skip('''test requires faiss''' )(_A ) return test_case def __UpperCamelCase ( _A ): try: import regex # noqa except ImportError: lowerCAmelCase_ = unittest.skip('''test requires regex''' )(_A ) return test_case def __UpperCamelCase ( _A ): try: import elasticsearch # noqa except ImportError: lowerCAmelCase_ = unittest.skip('''test requires elasticsearch''' )(_A ) return test_case def __UpperCamelCase ( _A ): try: import sqlalchemy # noqa except ImportError: lowerCAmelCase_ = unittest.skip('''test requires sqlalchemy''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not config.TORCH_AVAILABLE: lowerCAmelCase_ = unittest.skip('''test requires PyTorch''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not config.TF_AVAILABLE: lowerCAmelCase_ = unittest.skip('''test requires TensorFlow''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not config.JAX_AVAILABLE: lowerCAmelCase_ = unittest.skip('''test requires JAX''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not config.PIL_AVAILABLE: lowerCAmelCase_ = unittest.skip('''test requires Pillow''' )(_A ) return test_case def __UpperCamelCase ( _A ): try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(_A ) else: return test_case def __UpperCamelCase ( _A ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(_A ) else: return test_case def __UpperCamelCase ( _A ): try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(_A ) else: return test_case def __UpperCamelCase ( _A ): def _require_spacy_model(_A ): try: import spacy # noqa F401 spacy.load(_A ) except ImportError: return unittest.skip('''test requires spacy''' )(_A ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(_A ) )(_A ) else: return test_case return _require_spacy_model def __UpperCamelCase ( _A ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(_A ) else: return test_case def __UpperCamelCase ( _A ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(_A ) else: return test_case def __UpperCamelCase ( _A ): if not _run_slow_tests or _run_slow_tests == 0: lowerCAmelCase_ = unittest.skip('''test is slow''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not _run_local_tests or _run_local_tests == 0: lowerCAmelCase_ = unittest.skip('''test is local''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not _run_packaged_tests or _run_packaged_tests == 0: lowerCAmelCase_ = unittest.skip('''test is packaged''' )(_A ) return test_case def __UpperCamelCase ( _A ): if not _run_remote_tests or _run_remote_tests == 0: lowerCAmelCase_ = unittest.skip('''test requires remote''' )(_A ) return test_case def __UpperCamelCase ( *_A ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_A ) and name.startswith('''test''' ): for decorator in decorators: lowerCAmelCase_ = decorator(_A ) setattr(cls , _A , _A ) return cls return decorate class A ( __UpperCAmelCase ): pass class A ( __UpperCAmelCase ): __snake_case = 0 __snake_case = 1 __snake_case = 2 @contextmanager def __UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1E-1_6 ): lowerCAmelCase_ = requests.Session().request def timeout_request(_A , _A , _A , **_A ): # Change the url to an invalid url so that the connection hangs lowerCAmelCase_ = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) lowerCAmelCase_ = timeout try: return online_request(_A , _A , **_A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowerCAmelCase_ = url lowerCAmelCase_ = e.args[0] lowerCAmelCase_ = (max_retry_error.args[0].replace('''10.255.255.1''' , f"OfflineMock[{url}]" ),) lowerCAmelCase_ = (max_retry_error,) raise def raise_connection_error(_A , _A , **_A ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=_A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , _A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , _A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def __UpperCamelCase ( *_A , **_A ): lowerCAmelCase_ = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir: try: os.chdir(_A ) yield finally: os.chdir(_A ) @contextmanager def __UpperCamelCase ( ): import gc gc.collect() lowerCAmelCase_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __UpperCamelCase ( ): import gc gc.collect() lowerCAmelCase_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __UpperCamelCase ( _A , _A ): return deepcopy(_A ).integers(0 , 100 , 10 ).tolist() == deepcopy(_A ).integers(0 , 100 , 10 ).tolist() def __UpperCamelCase ( _A ): import decorator from requests.exceptions import HTTPError def _wrapper(_A , *_A , **_A ): try: return func(*_A , **_A ) except HTTPError as err: if str(_A ).startswith('''500''' ) or str(_A ).startswith('''502''' ): pytest.xfail(str(_A ) ) raise err return decorator.decorator(_wrapper , _A ) class A : def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = returncode lowerCAmelCase_ = stdout lowerCAmelCase_ = stderr async def __UpperCamelCase ( _A , _A ): while True: lowerCAmelCase_ = await stream.readline() if line: callback(_A ) else: break async def __UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ): if echo: print('''\nRunning: ''' , ''' '''.join(_A ) ) lowerCAmelCase_ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCAmelCase_ = [] lowerCAmelCase_ = [] def tee(_A , _A , _A , _A="" ): lowerCAmelCase_ = line.decode('''utf-8''' ).rstrip() sink.append(_A ) if not quiet: print(_A , _A , file=_A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label='''stderr:''' ) ), ] , timeout=_A , ) return _RunOutput(await p.wait() , _A , _A ) def __UpperCamelCase ( _A , _A=None , _A=None , _A=180 , _A=False , _A=True ): lowerCAmelCase_ = asyncio.get_event_loop() lowerCAmelCase_ = loop.run_until_complete( _stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) ) lowerCAmelCase_ = ''' '''.join(_A ) if result.returncode > 0: lowerCAmelCase_ = '''\n'''.join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def __UpperCamelCase ( ): lowerCAmelCase_ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) lowerCAmelCase_ = re.sub(r'''^gw''' , '''''' , _A , 0 , re.M ) return int(_A ) def __UpperCamelCase ( ): lowerCAmelCase_ = 29500 lowerCAmelCase_ = pytest_xdist_worker_id() return port + uniq_delta
278
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _A = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['''MobileViTFeatureExtractor'''] _A = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
278
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __UpperCamelCase ( _A , _A , _A ): # Construct model if openai_config_file == "": lowerCAmelCase_ = OpenAIGPTConfig() else: lowerCAmelCase_ = OpenAIGPTConfig.from_json_file(_A ) lowerCAmelCase_ = OpenAIGPTModel(_A ) # Load weights from numpy load_tf_weights_in_openai_gpt(_A , _A , _A ) # Save pytorch-model lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(model.state_dict() , _A ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) _A = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
1
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = StableDiffusionControlNetImgaImgPipeline __snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} ) __snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) torch.manual_seed(0 ) lowerCAmelCase_ = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0 ) lowerCAmelCase_ = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=UpperCamelCase__, set_alpha_to_one=UpperCamelCase__, ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) lowerCAmelCase_ = CLIPTextModel(UpperCamelCase__ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase_ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0 ): """simple docstring""" if str(UpperCamelCase__ ).startswith('''mps''' ): lowerCAmelCase_ = torch.manual_seed(UpperCamelCase__ ) else: lowerCAmelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCAmelCase_ = 2 lowerCAmelCase_ = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=UpperCamelCase__, device=torch.device(UpperCamelCase__ ), ) lowerCAmelCase_ = floats_tensor(control_image.shape, rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) ) lowerCAmelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = StableDiffusionControlNetImgaImgPipeline __snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} __snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __snake_case = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) torch.manual_seed(0 ) def init_weights(UpperCamelCase__ ): if isinstance(UpperCamelCase__, torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) lowerCAmelCase_ = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) lowerCAmelCase_ = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=UpperCamelCase__, set_alpha_to_one=UpperCamelCase__, ) torch.manual_seed(0 ) lowerCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCAmelCase_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) lowerCAmelCase_ = CLIPTextModel(UpperCamelCase__ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase_ = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0 ): """simple docstring""" if str(UpperCamelCase__ ).startswith('''mps''' ): lowerCAmelCase_ = torch.manual_seed(UpperCamelCase__ ) else: lowerCAmelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCAmelCase_ = 2 lowerCAmelCase_ = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=UpperCamelCase__, device=torch.device(UpperCamelCase__ ), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=UpperCamelCase__, device=torch.device(UpperCamelCase__ ), ), ] lowerCAmelCase_ = floats_tensor(control_image[0].shape, rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 )[0] lowerCAmelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) ) lowerCAmelCase_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) lowerCAmelCase_ = 10.0 lowerCAmelCase_ = 4 lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowerCAmelCase_ = steps lowerCAmelCase_ = scale lowerCAmelCase_ = pipe(**UpperCamelCase__ )[0] lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowerCAmelCase_ = steps lowerCAmelCase_ = scale lowerCAmelCase_ = pipe(**UpperCamelCase__, control_guidance_start=0.1, control_guidance_end=0.2 )[0] lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowerCAmelCase_ = steps lowerCAmelCase_ = scale lowerCAmelCase_ = pipe(**UpperCamelCase__, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase__ ) lowerCAmelCase_ = steps lowerCAmelCase_ = scale lowerCAmelCase_ = pipe(**UpperCamelCase__, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.get_dummy_components() lowerCAmelCase_ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase__ ) except NotImplementedError: pass @slow @require_torch_gpu class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', safety_checker=UpperCamelCase__, controlnet=UpperCamelCase__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase_ = '''evil space-punk bird''' lowerCAmelCase_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) lowerCAmelCase_ = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) lowerCAmelCase_ = pipe( UpperCamelCase__, UpperCamelCase__, control_image=UpperCamelCase__, generator=UpperCamelCase__, output_type='''np''', num_inference_steps=50, strength=0.6, ) lowerCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
278
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
1
from __future__ import annotations from random import random class A : def __init__( self, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = value lowerCAmelCase_ = random() lowerCAmelCase_ = None lowerCAmelCase_ = None def __repr__( self ): """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return f"'{self.value}: {self.prior:.5}'" else: return pformat( {f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1 ) def __str__( self ): """simple docstring""" lowerCAmelCase_ = str(self.value ) + ''' ''' lowerCAmelCase_ = str(self.left or '''''' ) lowerCAmelCase_ = str(self.right or '''''' ) return value + left + right def __UpperCamelCase ( _A , _A ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: lowerCAmelCase_ , lowerCAmelCase_ = split(root.left , _A ) return left, root else: lowerCAmelCase_ , lowerCAmelCase_ = split(root.right , _A ) return root, right def __UpperCamelCase ( _A , _A ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: lowerCAmelCase_ = merge(left.right , _A ) return left else: lowerCAmelCase_ = merge(_A , right.left ) return right def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = Node(_A ) lowerCAmelCase_ , lowerCAmelCase_ = split(_A , _A ) return merge(merge(_A , _A ) , _A ) def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ , lowerCAmelCase_ = split(_A , value - 1 ) lowerCAmelCase_ , lowerCAmelCase_ = split(_A , _A ) return merge(_A , _A ) def __UpperCamelCase ( _A ): if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def __UpperCamelCase ( _A , _A ): for arg in args.split(): if arg[0] == "+": lowerCAmelCase_ = insert(_A , int(arg[1:] ) ) elif arg[0] == "-": lowerCAmelCase_ = erase(_A , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def __UpperCamelCase ( ): lowerCAmelCase_ = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) lowerCAmelCase_ = input() while args != "q": lowerCAmelCase_ = interact_treap(_A , _A ) print(_A ) lowerCAmelCase_ = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
278
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
1
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
1
def __UpperCamelCase ( _A ): for i in range(len(_A ) - 1 , 0 , -1 ): lowerCAmelCase_ = False for j in range(_A , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCAmelCase_ , lowerCAmelCase_ = unsorted[j - 1], unsorted[j] lowerCAmelCase_ = True for j in range(_A ): if unsorted[j] > unsorted[j + 1]: lowerCAmelCase_ , lowerCAmelCase_ = unsorted[j + 1], unsorted[j] lowerCAmelCase_ = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _A = input('''Enter numbers separated by a comma:\n''').strip() _A = [int(item) for item in user_input.split(''',''')] print(f"{cocktail_shaker_sort(unsorted) = }")
278
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__UpperCAmelCase ) class A ( __UpperCAmelCase ): __snake_case = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) __snake_case = Features({'image': Image()} ) __snake_case = Features({'labels': ClassLabel} ) __snake_case = "image" __snake_case = "labels" def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column], UpperCamelCase__ ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) lowerCAmelCase_ = copy.deepcopy(self ) lowerCAmelCase_ = self.label_schema.copy() lowerCAmelCase_ = features[self.label_column] lowerCAmelCase_ = label_schema return task_template @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
278
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
1
_A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCamelCase ( _A ): # Make sure the supplied data is a bytes-like object if not isinstance(_A , _A ): lowerCAmelCase_ = f"a bytes-like object is required, not '{data.__class__.__name__}'" raise TypeError(_A ) lowerCAmelCase_ = ''''''.join(bin(_A )[2:].zfill(8 ) for byte in data ) lowerCAmelCase_ = len(_A ) % 6 != 0 if padding_needed: # The padding that will be added later lowerCAmelCase_ = b'''=''' * ((6 - len(_A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_A ) % 6) else: lowerCAmelCase_ = b'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_A ) , 6 ) ).encode() + padding ) def __UpperCamelCase ( _A ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_A , _A ) and not isinstance(_A , _A ): lowerCAmelCase_ = ( '''argument should be a bytes-like object or ASCII string, ''' f"not '{encoded_data.__class__.__name__}'" ) raise TypeError(_A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_A , _A ): try: lowerCAmelCase_ = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) lowerCAmelCase_ = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowerCAmelCase_ = encoded_data[:-padding] lowerCAmelCase_ = ''''''.join( bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowerCAmelCase_ = ''''''.join( bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data ) lowerCAmelCase_ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_A ) , 8 ) ] return bytes(_A ) if __name__ == "__main__": import doctest doctest.testmod()
278
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _A = logging.getLogger(__name__) @dataclass class A : __snake_case = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __snake_case = field( default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case = field( default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case = field( default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __snake_case = field(default=__UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) __snake_case = field(default=__UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class A : __snake_case = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) __snake_case = field( default=1024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) __snake_case = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) __snake_case = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) __snake_case = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) __snake_case = field(default=__UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) __snake_case = field(default=__UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) __snake_case = field(default=__UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) __snake_case = field( default=__UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __UpperCamelCase ( _A , _A , _A ): logger.info(f"***** {split} metrics *****" ) for key in sorted(metrics.keys() ): logger.info(f" {key} = {metrics[key]}" ) save_json(_A , os.path.join(_A , f"{split}_results.json" ) ) def __UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses() check_output_dir(_A ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , _A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(_A , _A , _A ): assert hasattr(_A , _A ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(_A , _A , getattr(_A , _A ) ) lowerCAmelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_A , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_A , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowerCAmelCase_ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_A , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_A , _A ): lowerCAmelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_A ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowerCAmelCase_ = SeqaSeqDataset # Get datasets lowerCAmelCase_ = ( dataset_class( _A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) lowerCAmelCase_ = ( dataset_class( _A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowerCAmelCase_ = ( dataset_class( _A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer lowerCAmelCase_ = ( build_compute_metrics_fn(data_args.task , _A ) if training_args.predict_with_generate else None ) lowerCAmelCase_ = SeqaSeqTrainer( model=_A , args=_A , data_args=_A , train_dataset=_A , eval_dataset=_A , data_collator=SeqaSeqDataCollator( _A , _A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_A , tokenizer=_A , ) lowerCAmelCase_ = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) lowerCAmelCase_ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowerCAmelCase_ = train_result.metrics lowerCAmelCase_ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , _A , training_args.output_dir ) all_metrics.update(_A ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCAmelCase_ = trainer.evaluate(metric_key_prefix='''val''' ) lowerCAmelCase_ = data_args.n_val lowerCAmelCase_ = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , _A , training_args.output_dir ) all_metrics.update(_A ) if training_args.do_predict: logger.info('''*** Predict ***''' ) lowerCAmelCase_ = trainer.predict(test_dataset=_A , metric_key_prefix='''test''' ) lowerCAmelCase_ = test_output.metrics lowerCAmelCase_ = data_args.n_test if trainer.is_world_process_zero(): lowerCAmelCase_ = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , _A , training_args.output_dir ) all_metrics.update(_A ) if training_args.predict_with_generate: lowerCAmelCase_ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) lowerCAmelCase_ = lmap(str.strip , _A ) write_txt_file(_A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(_A , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __UpperCamelCase ( _A ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
278
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''', '''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''', '''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''', '''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''', '''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''', '''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''', '''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''', '''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''', '''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''', } class A ( __UpperCAmelCase ): __snake_case = 'xmod' def __init__( self, UpperCamelCase__=3_0522, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=False, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=("en_XX",), UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = use_cache lowerCAmelCase_ = classifier_dropout lowerCAmelCase_ = pre_norm lowerCAmelCase_ = adapter_reduction_factor lowerCAmelCase_ = adapter_layer_norm lowerCAmelCase_ = adapter_reuse_layer_norm lowerCAmelCase_ = ln_before_adapter lowerCAmelCase_ = list(UpperCamelCase__ ) lowerCAmelCase_ = default_language class A ( __UpperCAmelCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
278
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _A = argparse.ArgumentParser( description=( '''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2''']) parser.add_argument('''--model_name''', default='''roberta-large''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') _A = parser.parse_args() if args.model_type == "roberta": _A = RobertaForMaskedLM.from_pretrained(args.model_name) _A = '''roberta''' elif args.model_type == "gpt2": _A = GPTaLMHeadModel.from_pretrained(args.model_name) _A = '''transformer''' _A = model.state_dict() _A = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _A = state_dict[f"{prefix}.{param_name}"] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _A = f"{prefix}.embeddings.{w}.weight" _A = state_dict[param_name] for w in ["weight", "bias"]: _A = f"{prefix}.embeddings.LayerNorm.{w}" _A = state_dict[param_name] # Transformer Blocks # _A = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _A = state_dict[ f"{prefix}.h.{teacher_idx}.{layer}.{w}" ] _A = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _A = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _A = state_dict[f"{layer}"] if args.vocab_transform: for w in ["weight", "bias"]: _A = state_dict[f"lm_head.dense.{w}"] _A = state_dict[f"lm_head.layer_norm.{w}"] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _A = state_dict[f"{prefix}.ln_f.{w}"] _A = state_dict['''lm_head.weight'''] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
278
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
1
from ..utils import DummyObject, requires_backends class A ( metaclass=__UpperCAmelCase ): __snake_case = ['speech'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''speech'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['speech'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''speech'''] )
278
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
1
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) lowerCAmelCase_ = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f"{solution() = }")
278
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
1
import warnings from .generation import TFGenerationMixin class A ( __UpperCAmelCase ): # warning at import time warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , __UpperCAmelCase , )
278
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
1
import string def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''''' for i in sequence: lowerCAmelCase_ = ord(_A ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def __UpperCamelCase ( _A ): lowerCAmelCase_ = string.ascii_letters lowerCAmelCase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(_A )] if c in letters else c for c in sequence ) def __UpperCamelCase ( ): from timeit import timeit print('''Running performance benchmarks...''' ) lowerCAmelCase_ = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_A )} seconds" ) print(f"> atbash(): {timeit('atbash(printable)' , setup=_A )} seconds" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f"{example} encrypted in atbash: {atbash(example)}") benchmark()
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
def __UpperCamelCase ( _A ): if not isinstance(_A , _A ): raise TypeError('''Input value must be an \'int\' type''' ) lowerCAmelCase_ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
278
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
1
from statistics import mean import numpy as np def __UpperCamelCase ( _A , _A , _A , _A ): lowerCAmelCase_ = 0 # Number of processes finished lowerCAmelCase_ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCAmelCase_ = [0] * no_of_process # List to include calculation results lowerCAmelCase_ = [0] * no_of_process # Sort by arrival time. lowerCAmelCase_ = [burst_time[i] for i in np.argsort(_A )] lowerCAmelCase_ = [process_name[i] for i in np.argsort(_A )] arrival_time.sort() while no_of_process > finished_process_count: lowerCAmelCase_ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCAmelCase_ = arrival_time[i] lowerCAmelCase_ = 0 # Index showing the location of the process being performed lowerCAmelCase_ = 0 # Saves the current response ratio. lowerCAmelCase_ = 0 for i in range(0 , _A ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCAmelCase_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCAmelCase_ = temp lowerCAmelCase_ = i # Calculate the turn around time lowerCAmelCase_ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCAmelCase_ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def __UpperCamelCase ( _A , _A , _A , _A ): lowerCAmelCase_ = [0] * no_of_process for i in range(0 , _A ): lowerCAmelCase_ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A = 5 _A = ['''A''', '''B''', '''C''', '''D''', '''E'''] _A = [1, 2, 3, 4, 5] _A = [1, 2, 3, 4, 5] _A = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t" f"{turn_around_time[i]}\t\t\t{waiting_time[i]}" ) print(f"average waiting time : {mean(waiting_time):.5f}") print(f"average turn around time : {mean(turn_around_time):.5f}")
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
from collections.abc import Generator from math import sin def __UpperCamelCase ( _A ): if len(_A ) != 32: raise ValueError('''Input must be of length 32''' ) lowerCAmelCase_ = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __UpperCamelCase ( _A ): if i < 0: raise ValueError('''Input must be non-negative''' ) lowerCAmelCase_ = format(_A , '''08x''' )[-8:] lowerCAmelCase_ = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __UpperCamelCase ( _A ): lowerCAmelCase_ = b'''''' for char in message: bit_string += format(_A , '''08b''' ).encode('''utf-8''' ) lowerCAmelCase_ = format(len(_A ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_A ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __UpperCamelCase ( _A ): if len(_A ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_A ) , 512 ): lowerCAmelCase_ = bit_string[pos : pos + 512] lowerCAmelCase_ = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __UpperCamelCase ( _A ): if i < 0: raise ValueError('''Input must be non-negative''' ) lowerCAmelCase_ = format(_A , '''032b''' ) lowerCAmelCase_ = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_A , 2 ) def __UpperCamelCase ( _A , _A ): return (a + b) % 2**32 def __UpperCamelCase ( _A , _A ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __UpperCamelCase ( _A ): lowerCAmelCase_ = preprocess(_A ) lowerCAmelCase_ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states lowerCAmelCase_ = 0x67452301 lowerCAmelCase_ = 0xEFCDAB89 lowerCAmelCase_ = 0x98BADCFE lowerCAmelCase_ = 0x10325476 lowerCAmelCase_ = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_A ): lowerCAmelCase_ = aa lowerCAmelCase_ = ba lowerCAmelCase_ = ca lowerCAmelCase_ = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f lowerCAmelCase_ = d ^ (b & (c ^ d)) lowerCAmelCase_ = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f lowerCAmelCase_ = c ^ (d & (b ^ c)) lowerCAmelCase_ = (5 * i + 1) % 16 elif i <= 47: lowerCAmelCase_ = b ^ c ^ d lowerCAmelCase_ = (3 * i + 5) % 16 else: lowerCAmelCase_ = c ^ (b | not_aa(_A )) lowerCAmelCase_ = (7 * i) % 16 lowerCAmelCase_ = (f + a + added_consts[i] + block_words[g]) % 2**32 lowerCAmelCase_ = d lowerCAmelCase_ = c lowerCAmelCase_ = b lowerCAmelCase_ = sum_aa(_A , left_rotate_aa(_A , shift_amounts[i] ) ) # Add hashed chunk to running total lowerCAmelCase_ = sum_aa(_A , _A ) lowerCAmelCase_ = sum_aa(_A , _A ) lowerCAmelCase_ = sum_aa(_A , _A ) lowerCAmelCase_ = sum_aa(_A , _A ) lowerCAmelCase_ = reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A ) return digest if __name__ == "__main__": import doctest doctest.testmod()
278
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
1
def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = ( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCAmelCase_ = [] for char_count in range(_A ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_A ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
278
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
1
def __UpperCamelCase ( _A , _A , _A ): if len(_A ) != len(_A ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. lowerCAmelCase_ = [p / w for p, w in zip(_A , _A )] # Creating a copy of the list and sorting profit/weight in ascending order lowerCAmelCase_ = sorted(_A ) # declaring useful variables lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight lowerCAmelCase_ = sorted_profit_by_weight[length - i - 1] lowerCAmelCase_ = profit_by_weight.index(_A ) lowerCAmelCase_ = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) _A = [int(x) for x in input('''Input profits separated by spaces: ''').split()] _A = [int(x) for x in input('''Input weights separated by spaces: ''').split()] _A = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
278
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A = logging.get_logger(__name__) _A = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class A ( __UpperCAmelCase , __UpperCAmelCase ): __snake_case = 'focalnet' def __init__( self, UpperCamelCase__=224, UpperCamelCase__=4, UpperCamelCase__=3, UpperCamelCase__=96, UpperCamelCase__=False, UpperCamelCase__=[192, 384, 768, 768], UpperCamelCase__=[2, 2, 6, 2], UpperCamelCase__=[2, 2, 2, 2], UpperCamelCase__=[3, 3, 3, 3], UpperCamelCase__="gelu", UpperCamelCase__=4.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=False, UpperCamelCase__=1E-4, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=32, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = use_conv_embed lowerCAmelCase_ = hidden_sizes lowerCAmelCase_ = depths lowerCAmelCase_ = focal_levels lowerCAmelCase_ = focal_windows lowerCAmelCase_ = hidden_act lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = use_layerscale lowerCAmelCase_ = layerscale_value lowerCAmelCase_ = use_post_layernorm lowerCAmelCase_ = use_post_layernorm_in_modulation lowerCAmelCase_ = normalize_modulator lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = encoder_stride lowerCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(self.depths ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=UpperCamelCase__, out_indices=UpperCamelCase__, stage_names=self.stage_names )
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class A : @staticmethod def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" pass def __UpperCamelCase ( _A ): lowerCAmelCase_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class A ( unittest.TestCase ): __snake_case = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = DepthEstimationPipeline(model=UpperCamelCase__, image_processor=UpperCamelCase__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, UpperCamelCase__ ) import datasets lowerCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' ) lowerCAmelCase_ = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ], UpperCamelCase__, ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass @slow @require_torch def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''Intel/dpt-large''' lowerCAmelCase_ = pipeline('''depth-estimation''', model=UpperCamelCase__ ) lowerCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) lowerCAmelCase_ = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ), 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ), 2.662 ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
278
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings _A = R''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(__UpperCAmelCase ) class A ( __UpperCAmelCase ): __snake_case = 'rag' __snake_case = True def __init__( self, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=" / ", UpperCamelCase__=" // ", UpperCamelCase__=5, UpperCamelCase__=300, UpperCamelCase__=768, UpperCamelCase__=8, UpperCamelCase__="wiki_dpr", UpperCamelCase__="train", UpperCamelCase__="compressed", UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__( bos_token_id=UpperCamelCase__, pad_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, forced_eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, prefix=UpperCamelCase__, vocab_size=UpperCamelCase__, **UpperCamelCase__, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowerCAmelCase_ = kwargs.pop('''question_encoder''' ) lowerCAmelCase_ = question_encoder_config.pop('''model_type''' ) lowerCAmelCase_ = kwargs.pop('''generator''' ) lowerCAmelCase_ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ = AutoConfig.for_model(UpperCamelCase__, **UpperCamelCase__ ) lowerCAmelCase_ = AutoConfig.for_model(UpperCamelCase__, **UpperCamelCase__ ) lowerCAmelCase_ = reduce_loss lowerCAmelCase_ = label_smoothing lowerCAmelCase_ = exclude_bos_score lowerCAmelCase_ = do_marginalize lowerCAmelCase_ = title_sep lowerCAmelCase_ = doc_sep lowerCAmelCase_ = n_docs lowerCAmelCase_ = max_combined_length lowerCAmelCase_ = dataset lowerCAmelCase_ = dataset_split lowerCAmelCase_ = index_name lowerCAmelCase_ = retrieval_vector_size lowerCAmelCase_ = retrieval_batch_size lowerCAmelCase_ = passages_path lowerCAmelCase_ = index_path lowerCAmelCase_ = use_dummy_dataset lowerCAmelCase_ = output_retrieved lowerCAmelCase_ = do_deduplication lowerCAmelCase_ = use_cache if self.forced_eos_token_id is None: lowerCAmelCase_ = getattr(self.generator, '''forced_eos_token_id''', UpperCamelCase__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.question_encoder.to_dict() lowerCAmelCase_ = self.generator.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
278
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _A = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _A = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' _A = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def __UpperCamelCase ( _A ): def remove_articles(_A ): lowerCAmelCase_ = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_A , ''' ''' , _A ) def white_space_fix(_A ): return " ".join(text.split() ) def remove_punc(_A ): lowerCAmelCase_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) ) def __UpperCamelCase ( _A , _A ): return int(normalize_answer(_A ) == normalize_answer(_A ) ) def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )] return (sum(_A ) / len(_A )) * 100 def __UpperCamelCase ( _A , _A , _A , _A ): lowerCAmelCase_ = [rgram for rgrams in rgramslist for rgram in rgrams] lowerCAmelCase_ = Counter(_A ) lowerCAmelCase_ = Counter(_A ) lowerCAmelCase_ = Counter() for sgram, scount in sgramcounter.items(): lowerCAmelCase_ = scount * numref lowerCAmelCase_ = Counter(_A ) lowerCAmelCase_ = Counter() for cgram, ccount in cgramcounter.items(): lowerCAmelCase_ = ccount * numref # KEEP lowerCAmelCase_ = sgramcounter_rep & cgramcounter_rep lowerCAmelCase_ = keepgramcounter_rep & rgramcounter lowerCAmelCase_ = sgramcounter_rep & rgramcounter lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 if len(_A ) > 0: lowerCAmelCase_ = keeptmpscorea / len(_A ) if len(_A ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) lowerCAmelCase_ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) lowerCAmelCase_ = 0 if keepscore_precision > 0 or keepscore_recall > 0: lowerCAmelCase_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION lowerCAmelCase_ = sgramcounter_rep - cgramcounter_rep lowerCAmelCase_ = delgramcounter_rep - rgramcounter lowerCAmelCase_ = sgramcounter_rep - rgramcounter lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCAmelCase_ = 1 if len(_A ) > 0: lowerCAmelCase_ = deltmpscorea / len(_A ) # ADDITION lowerCAmelCase_ = set(_A ) - set(_A ) lowerCAmelCase_ = set(_A ) & set(_A ) lowerCAmelCase_ = set(_A ) - set(_A ) lowerCAmelCase_ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 if len(_A ) > 0: lowerCAmelCase_ = addtmpscore / len(_A ) if len(_A ) > 0: lowerCAmelCase_ = addtmpscore / len(_A ) lowerCAmelCase_ = 0 if addscore_precision > 0 or addscore_recall > 0: lowerCAmelCase_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = ssent.split(''' ''' ) lowerCAmelCase_ = csent.split(''' ''' ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] for rsent in rsents: lowerCAmelCase_ = rsent.split(''' ''' ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] ragramslist.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_A ) if i < len(_A ) - 2: lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_A ) if i < len(_A ) - 3: lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_A ) ragramslist.append(_A ) ragramslist.append(_A ) ragramslist.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_A ) if i < len(_A ) - 2: lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_A ) if i < len(_A ) - 3: lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_A ) for i in range(0 , len(_A ) - 1 ): if i < len(_A ) - 1: lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_A ) if i < len(_A ) - 2: lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_A ) if i < len(_A ) - 3: lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_A ) ((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(_A , _A , _A , _A ) ((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(_A , _A , _A , _A ) ((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(_A , _A , _A , _A ) ((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(_A , _A , _A , _A ) lowerCAmelCase_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 lowerCAmelCase_ = sum([delascore, delascore, delascore, delascore] ) / 4 lowerCAmelCase_ = sum([addascore, addascore, addascore, addascore] ) / 4 lowerCAmelCase_ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __UpperCamelCase ( _A , _A = True , _A = "13a" , _A = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: lowerCAmelCase_ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: lowerCAmelCase_ = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A ) else: lowerCAmelCase_ = sacrebleu.TOKENIZERS[tokenizer]()(_A ) elif tokenizer == "moses": lowerCAmelCase_ = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A ) elif tokenizer == "penn": lowerCAmelCase_ = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A ) else: lowerCAmelCase_ = sentence if not return_str: lowerCAmelCase_ = normalized_sent.split() return normalized_sent def __UpperCamelCase ( _A , _A , _A ): if not (len(_A ) == len(_A ) == len(_A )): raise ValueError('''Sources length must match predictions and references lengths.''' ) lowerCAmelCase_ = 0 for src, pred, refs in zip(_A , _A , _A ): sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] ) lowerCAmelCase_ = sari_score / len(_A ) return 100 * sari_score def __UpperCamelCase ( _A , _A , _A="exp" , _A=None , _A=False , _A=False , _A=False , ): lowerCAmelCase_ = len(references[0] ) if any(len(_A ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) lowerCAmelCase_ = [[refs[i] for refs in references] for i in range(_A )] lowerCAmelCase_ = sacrebleu.corpus_bleu( _A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''', id='''sequence''' ), id='''references''' ), } ), codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ], reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = {} result.update({'''sari''': compute_sari(sources=UpperCamelCase__, predictions=UpperCamelCase__, references=UpperCamelCase__ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=UpperCamelCase__, references=UpperCamelCase__ )} ) result.update({'''exact''': compute_em(predictions=UpperCamelCase__, references=UpperCamelCase__ )} ) return result
278
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
1
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar _A = TypeVar('''T''') _A = TypeVar('''U''') class A ( Generic[T, U] ): def __init__( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = key lowerCAmelCase_ = val lowerCAmelCase_ = None lowerCAmelCase_ = None def __repr__( self ): """simple docstring""" return ( f"Node: key: {self.key}, val: {self.val}, " f"has next: {bool(self.next )}, has prev: {bool(self.prev )}" ) class A ( Generic[T, U] ): def __init__( self ): """simple docstring""" lowerCAmelCase_ = DoubleLinkedListNode(UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = DoubleLinkedListNode(UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ = self.rear, self.head def __repr__( self ): """simple docstring""" lowerCAmelCase_ = ['''DoubleLinkedList'''] lowerCAmelCase_ = self.head while node.next is not None: rep.append(str(UpperCamelCase__ ) ) lowerCAmelCase_ = node.next rep.append(str(self.rear ) ) return ",\n ".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowerCAmelCase_ = node lowerCAmelCase_ = previous lowerCAmelCase_ = node lowerCAmelCase_ = self.rear def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" if node.prev is None or node.next is None: return None lowerCAmelCase_ = node.next lowerCAmelCase_ = node.prev lowerCAmelCase_ = None lowerCAmelCase_ = None return node class A ( Generic[T, U] ): __snake_case = {} def __init__( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = DoubleLinkedList() lowerCAmelCase_ = capacity lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = {} def __repr__( self ): """simple docstring""" return ( f"CacheInfo(hits={self.hits}, misses={self.miss}, " f"capacity={self.capacity}, current size={self.num_keys})" ) def __contains__( self, UpperCamelCase__ ): """simple docstring""" return key in self.cache def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" if key in self.cache: self.hits += 1 lowerCAmelCase_ = self.cache[key] lowerCAmelCase_ = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(UpperCamelCase__ ) return node.val self.miss += 1 return None def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowerCAmelCase_ = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(UpperCamelCase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowerCAmelCase_ = DoubleLinkedListNode(UpperCamelCase__, UpperCamelCase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowerCAmelCase_ = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowerCAmelCase_ = value self.list.add(UpperCamelCase__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = 128 ): """simple docstring""" def cache_decorator_inner(UpperCamelCase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*UpperCamelCase__ ) -> U: if func not in cls.decorator_function_to_instance_map: lowerCAmelCase_ = LRUCache(UpperCamelCase__ ) lowerCAmelCase_ = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowerCAmelCase_ = func(*UpperCamelCase__ ) cls.decorator_function_to_instance_map[func].put(args[0], UpperCamelCase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(UpperCamelCase__, '''cache_info''', UpperCamelCase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
278
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
1
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
278
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
1
def __UpperCamelCase ( _A ): return " ".join( ''''''.join(word[::-1] ) if len(_A ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
278
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
1
from __future__ import annotations import requests _A = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def __UpperCamelCase ( _A , _A = 1 , _A = "new" , _A = None ): lowerCAmelCase_ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_A ) - valid_terms ) ): lowerCAmelCase_ = f"Invalid search term: {invalid_search_terms}" raise ValueError(_A ) lowerCAmelCase_ = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError lowerCAmelCase_ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_A )} lowerCAmelCase_ = {} for id_ in range(_A ): lowerCAmelCase_ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
278
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A ( __UpperCAmelCase ): __snake_case = 'pegasus' __snake_case = ['past_key_values'] __snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=1024, UpperCamelCase__=12, UpperCamelCase__=4096, UpperCamelCase__=16, UpperCamelCase__=12, UpperCamelCase__=4096, UpperCamelCase__=16, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__="gelu", UpperCamelCase__=1024, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=0, UpperCamelCase__=False, UpperCamelCase__=0, UpperCamelCase__=1, UpperCamelCase__=1, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = d_model lowerCAmelCase_ = encoder_ffn_dim lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = encoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = activation_function lowerCAmelCase_ = init_std lowerCAmelCase_ = encoder_layerdrop lowerCAmelCase_ = decoder_layerdrop lowerCAmelCase_ = use_cache lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, forced_eos_token_id=UpperCamelCase__, **UpperCamelCase__, ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self.d_model
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class A : __snake_case = LEDConfig __snake_case = {} __snake_case = 'gelu' def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=20, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=4, ): """simple docstring""" lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = pad_token_id lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCAmelCase_ = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCAmelCase_ = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowerCAmelCase_ = tf.concat([input_ids, eos_tensor], axis=1 ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCAmelCase_ = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, ) lowerCAmelCase_ = prepare_led_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = tf.concat( [tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]], axis=-1, ) lowerCAmelCase_ = global_attention_mask return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = TFLEDModel(config=UpperCamelCase__ ).get_decoder() lowerCAmelCase_ = inputs_dict['''input_ids'''] lowerCAmelCase_ = input_ids[:1, :] lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase_ = 1 # first forward pass lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, use_cache=UpperCamelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ = ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and lowerCAmelCase_ = tf.concat([input_ids, next_tokens], axis=-1 ) lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask], axis=-1 ) lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )[0] lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, past_key_values=UpperCamelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice lowerCAmelCase_ = int(ids_tensor((1,), output_from_past.shape[-1] ) ) lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__, UpperCamelCase__, rtol=1E-3 ) def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , ): if attention_mask is None: lowerCAmelCase_ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __snake_case = (TFLEDForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False __snake_case = False def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = TFLEDModelTester(self ) lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = tf.zeros_like(inputs_dict['''attention_mask'''] ) lowerCAmelCase_ = 2 lowerCAmelCase_ = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict['''global_attention_mask'''], ) lowerCAmelCase_ = True lowerCAmelCase_ = self.model_tester.seq_length lowerCAmelCase_ = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCamelCase__ ): lowerCAmelCase_ = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], ) def check_encoder_attentions_output(UpperCamelCase__ ): lowerCAmelCase_ = [t.numpy() for t in outputs.encoder_attentions] lowerCAmelCase_ = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers ) self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], ) self.assertListEqual( list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], ) for model_class in self.all_model_classes: lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = model(self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) ) lowerCAmelCase_ = len(UpperCamelCase__ ) self.assertEqual(config.output_hidden_states, UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) if self.is_encoder_decoder: lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = model(self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states, UpperCamelCase__ ) check_decoder_attentions_output(UpperCamelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase_ = True lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = model(self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) ) self.assertEqual(config.output_hidden_states, UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) # Check attention is always last and order is fine lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = model(self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(UpperCamelCase__ ) ) self.assertEqual(model.config.output_hidden_states, UpperCamelCase__ ) check_encoder_attentions_output(UpperCamelCase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def __UpperCamelCase ( _A ): return tf.constant(_A , dtype=tf.intaa ) _A = 1e-4 @slow @require_tf class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here lowerCAmelCase_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ = prepare_led_inputs_dict(model.config, UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = model(**UpperCamelCase__ )[0] lowerCAmelCase_ = (1, 1024, 768) self.assertEqual(output.shape, UpperCamelCase__ ) # change to expected output here lowerCAmelCase_ = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]], ) tf.debugging.assert_near(output[:, :3, :3], UpperCamelCase__, atol=1E-3 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here lowerCAmelCase_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ = prepare_led_inputs_dict(model.config, UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = model(**UpperCamelCase__ )[0] lowerCAmelCase_ = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape, UpperCamelCase__ ) # change to expected output here lowerCAmelCase_ = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]], ) tf.debugging.assert_near(output[:, :3, :3], UpperCamelCase__, atol=1E-3, rtol=1E-3 )
278
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A ( unittest.TestCase ): __snake_case = MODEL_FOR_CAUSAL_LM_MAPPING __snake_case = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = pipeline(task='''text-generation''', model='''sshleifer/tiny-ctrl''', framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase_ = text_generator('''This is a test''', do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], ) lowerCAmelCase_ = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( UpperCamelCase__, [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ], ) lowerCAmelCase_ = text_generator('''This is a test''', do_sample=UpperCamelCase__, num_return_sequences=2, return_tensors=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ {'''generated_token_ids''': ANY(UpperCamelCase__ )}, {'''generated_token_ids''': ANY(UpperCamelCase__ )}, ], ) lowerCAmelCase_ = text_generator.model.config.eos_token_id lowerCAmelCase_ = '''<pad>''' lowerCAmelCase_ = text_generator( ['''This is a test''', '''This is a second test'''], do_sample=UpperCamelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCamelCase__, ) self.assertEqual( UpperCamelCase__, [ [ {'''generated_token_ids''': ANY(UpperCamelCase__ )}, {'''generated_token_ids''': ANY(UpperCamelCase__ )}, ], [ {'''generated_token_ids''': ANY(UpperCamelCase__ )}, {'''generated_token_ids''': ANY(UpperCamelCase__ )}, ], ], ) @require_tf def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = pipeline(task='''text-generation''', model='''sshleifer/tiny-ctrl''', framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase_ = text_generator('''This is a test''', do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], ) lowerCAmelCase_ = text_generator(['''This is a test''', '''This is a second test'''], do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = TextGenerationPipeline(model=UpperCamelCase__, tokenizer=UpperCamelCase__ ) return text_generator, ["This is a test", "Another test"] def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''Hello I believe in''' lowerCAmelCase_ = pipeline('''text-generation''', model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase_ = text_generator(UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}], ) lowerCAmelCase_ = text_generator(UpperCamelCase__, stop_sequence=''' fe''' ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': '''Hello I believe in fe'''}] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = text_generator.model lowerCAmelCase_ = text_generator.tokenizer lowerCAmelCase_ = text_generator('''This is a test''' ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': ANY(UpperCamelCase__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase_ = text_generator('''This is a test''', return_full_text=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': ANY(UpperCamelCase__ )}] ) self.assertNotIn('''This is a test''', outputs[0]['''generated_text'''] ) lowerCAmelCase_ = pipeline(task='''text-generation''', model=UpperCamelCase__, tokenizer=UpperCamelCase__, return_full_text=UpperCamelCase__ ) lowerCAmelCase_ = text_generator('''This is a test''' ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': ANY(UpperCamelCase__ )}] ) self.assertNotIn('''This is a test''', outputs[0]['''generated_text'''] ) lowerCAmelCase_ = text_generator('''This is a test''', return_full_text=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': ANY(UpperCamelCase__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase_ = text_generator(['''This is great !''', '''Something else'''], num_return_sequences=2, do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ [{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}], [{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}], ], ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase_ = text_generator( ['''This is great !''', '''Something else'''], num_return_sequences=2, batch_size=2, do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ [{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}], [{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}], ], ) with self.assertRaises(UpperCamelCase__ ): lowerCAmelCase_ = text_generator('''test''', return_full_text=UpperCamelCase__, return_text=UpperCamelCase__ ) with self.assertRaises(UpperCamelCase__ ): lowerCAmelCase_ = text_generator('''test''', return_full_text=UpperCamelCase__, return_tensors=UpperCamelCase__ ) with self.assertRaises(UpperCamelCase__ ): lowerCAmelCase_ = text_generator('''test''', return_text=UpperCamelCase__, return_tensors=UpperCamelCase__ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase_ = text_generator('''''' ) self.assertEqual(UpperCamelCase__, [{'''generated_text''': ANY(UpperCamelCase__ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase_ = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase_ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500, max_new_tokens=20 ) lowerCAmelCase_ = text_generator('''This is a test''' * 500, handle_long_generation='''hole''', max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(UpperCamelCase__ ): text_generator( '''This is a test''' * 500, handle_long_generation='''hole''', max_new_tokens=tokenizer.model_max_length + 10, ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" import torch # Classic `model_kwargs` lowerCAmelCase_ = pipeline( model='''hf-internal-testing/tiny-random-bloom''', model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa}, ) self.assertEqual(pipe.model.device, torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa ) lowerCAmelCase_ = pipe('''This is a test''' ) self.assertEqual( UpperCamelCase__, [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ], ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''', device_map='''auto''', torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device, torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa ) lowerCAmelCase_ = pipe('''This is a test''' ) self.assertEqual( UpperCamelCase__, [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ], ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''', device_map='''auto''' ) self.assertEqual(pipe.model.device, torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.floataa ) lowerCAmelCase_ = pipe('''This is a test''' ) self.assertEqual( UpperCamelCase__, [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ], ) @require_torch @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" import torch lowerCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''', device=0, torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" import torch lowerCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''', device_map='''auto''', torch_dtype=torch.floataa ) pipe('''This is a test''', do_sample=UpperCamelCase__, top_p=0.5 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''Hello world''' lowerCAmelCase_ = pipeline('''text-generation''', model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase_ = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase_ = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase_ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(UpperCamelCase__ ) as cl: lowerCAmelCase_ = text_generator(UpperCamelCase__, max_length=10, max_new_tokens=1 ) self.assertIn(UpperCamelCase__, cl.out ) # The user only sets one -> no warning with CaptureLogger(UpperCamelCase__ ) as cl: lowerCAmelCase_ = text_generator(UpperCamelCase__, max_new_tokens=1 ) self.assertNotIn(UpperCamelCase__, cl.out ) with CaptureLogger(UpperCamelCase__ ) as cl: lowerCAmelCase_ = text_generator(UpperCamelCase__, max_length=10 ) self.assertNotIn(UpperCamelCase__, cl.out )
278
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
1
from math import loga def __UpperCamelCase ( _A ): if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_A , _A ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
278
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
1
from __future__ import annotations from scipy.special import comb # type: ignore class A : def __init__( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCAmelCase_ = len(UpperCamelCase__ ) - 1 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase_ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree, UpperCamelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(UpperCamelCase__ ), 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase_ = self.basis_function(UpperCamelCase__ ) lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ = 0.01 ): """simple docstring""" from matplotlib import pyplot as plt # type: ignore lowerCAmelCase_ = [] # x coordinates of points to plot lowerCAmelCase_ = [] # y coordinates of points to plot lowerCAmelCase_ = 0.0 while t <= 1: lowerCAmelCase_ = self.bezier_curve_function(UpperCamelCase__ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCAmelCase_ = [i[0] for i in self.list_of_points] lowerCAmelCase_ = [i[1] for i in self.list_of_points] plt.plot( UpperCamelCase__, UpperCamelCase__, color='''blue''', label='''Curve of Degree ''' + str(self.degree ), ) plt.scatter(UpperCamelCase__, UpperCamelCase__, color='''red''', label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
278
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A ( pl.LightningModule ): def __init__( self, UpperCamelCase__ ): """simple docstring""" super().__init__() lowerCAmelCase_ = model lowerCAmelCase_ = 2 lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size, self.num_labels ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def __UpperCamelCase ( _A , _A , _A ): # load longformer model from model identifier lowerCAmelCase_ = LongformerModel.from_pretrained(_A ) lowerCAmelCase_ = LightningModel(_A ) lowerCAmelCase_ = torch.load(_A , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(_A ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_A ) print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _A = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
278
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
1
def __UpperCamelCase ( _A , _A , _A , _A , _A , ): lowerCAmelCase_ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: lowerCAmelCase_ = 1 - (matter_density + radiation_density + dark_energy) lowerCAmelCase_ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowerCAmelCase_ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _A = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
from ..utils import DummyObject, requires_backends class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) class A ( metaclass=__UpperCAmelCase ): __snake_case = ['torch', 'transformers', 'onnx'] def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
278
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __UpperCamelCase ( _A , _A , _A ): # Initialise PyTorch model lowerCAmelCase_ = AlbertConfig.from_json_file(_A ) print(f"Building PyTorch model from configuration: {config}" ) lowerCAmelCase_ = AlbertForPreTraining(_A ) # Load weights from tf checkpoint load_tf_weights_in_albert(_A , _A , _A ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
278
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def __UpperCamelCase ( ): lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=_A , default=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=_A , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=_A , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=_A , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=_A , default=0 , help='''cuda_id.''' , ) lowerCAmelCase_ = parser.parse_args() return args def __UpperCamelCase ( _A , _A , _A ): if not len(_A ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowerCAmelCase_ , lowerCAmelCase_ = imgs[0].size lowerCAmelCase_ = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowerCAmelCase_ , lowerCAmelCase_ = grid.size for i, img in enumerate(_A ): grid.paste(_A , box=(i % cols * w, i // cols * h) ) return grid def __UpperCamelCase ( _A , _A="robotic cat with wings" , _A=7.5 , _A=50 , _A=1 , _A=42 , ): lowerCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(_A ) lowerCAmelCase_ = pipeline( _A , guidance_scale=_A , num_inference_steps=_A , generator=_A , num_images_per_prompt=_A , ).images lowerCAmelCase_ = int(math.sqrt(_A ) ) lowerCAmelCase_ = image_grid(_A , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images _A = parse_args() # Load models and create wrapper for stable diffusion _A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') _A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') _A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') _A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') _A = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) _A = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): _A = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: _A = unet.to(torch.device('''cuda''', args.cuda_id)) _A = pipeline.to(unet.device) _A , _A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) _A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
1
from math import factorial, radians def __UpperCamelCase ( _A , _A = 18 , _A = 10 ): lowerCAmelCase_ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0) # Converting from degrees to radians lowerCAmelCase_ = radians(_A ) lowerCAmelCase_ = angle_in_radians lowerCAmelCase_ = 3 lowerCAmelCase_ = -1 for _ in range(_A ): result += (b * (angle_in_radians**a)) / factorial(_A ) lowerCAmelCase_ = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_A , _A ) if __name__ == "__main__": __import__('''doctest''').testmod()
278
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( __UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__, '''embed_dim''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase__, '''num_heads''' ) ) class A : def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=64, UpperCamelCase__=3, UpperCamelCase__=[16, 48, 96], UpperCamelCase__=[1, 3, 6], UpperCamelCase__=[1, 2, 10], UpperCamelCase__=[7, 3, 3], UpperCamelCase__=[4, 2, 2], UpperCamelCase__=[2, 1, 1], UpperCamelCase__=[2, 2, 2], UpperCamelCase__=[False, False, True], UpperCamelCase__=[0.0, 0.0, 0.0], UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=2, ): """simple docstring""" lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_sizes lowerCAmelCase_ = patch_stride lowerCAmelCase_ = patch_padding lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = num_heads lowerCAmelCase_ = stride_kv lowerCAmelCase_ = depth lowerCAmelCase_ = cls_token lowerCAmelCase_ = attention_drop_rate lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None if self.use_labels: # create a random int32 tensor of given shape lowerCAmelCase_ = ids_tensor([self.batch_size], self.num_labels ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = TFCvtModel(config=UpperCamelCase__ ) lowerCAmelCase_ = model(UpperCamelCase__, training=UpperCamelCase__ ) lowerCAmelCase_ = (self.image_size, self.image_size) lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1] for i in range(len(self.depth ) ): lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = TFCvtForImageClassification(UpperCamelCase__ ) lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__, training=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () __snake_case = ( {'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification} if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False __snake_case = False __snake_case = False def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = TFCvtModelTester(self ) lowerCAmelCase_ = TFCvtConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='''Cvt does not output attentions''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', ) @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = tf.keras.mixed_precision.Policy('''mixed_float16''' ) tf.keras.mixed_precision.set_global_policy(UpperCamelCase__ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('''float32''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1], UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): lowerCAmelCase_ = model_class(UpperCamelCase__ ) lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) ) lowerCAmelCase_ = outputs.hidden_states lowerCAmelCase_ = len(self.model_tester.depth ) self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = TFCvtModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __UpperCamelCase ( ): lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''tf''' ) # forward pass lowerCAmelCase_ = model(**UpperCamelCase__ ) # verify the logits lowerCAmelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), UpperCamelCase__, atol=1E-4 ) )
278
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
1
import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin _A = logging.get_logger(__name__) enable_full_determinism() class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = UNetaDModel __snake_case = 'sample' @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 lowerCAmelCase_ = (32, 32) lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor([10] ).to(UpperCamelCase__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (3, 32, 32) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (3, 32, 32) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = UNetaDModel __snake_case = 'sample' @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = 4 lowerCAmelCase_ = 4 lowerCAmelCase_ = (32, 32) lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor([10] ).to(UpperCamelCase__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (4, 32, 32) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (4, 32, 32) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(UpperCamelCase__ ) lowerCAmelCase_ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''', '''This test is supposed to run on GPU''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=UpperCamelCase__ ) model.to(UpperCamelCase__ ) lowerCAmelCase_ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''', '''This test is supposed to run on GPU''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=UpperCamelCase__ ) model_accelerate.to(UpperCamelCase__ ) model_accelerate.eval() lowerCAmelCase_ = torch.randn( 1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), ) lowerCAmelCase_ = noise.to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ ) lowerCAmelCase_ = model_accelerate(UpperCamelCase__, UpperCamelCase__ )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCAmelCase_ , lowerCAmelCase_ = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''', output_loading_info=UpperCamelCase__, low_cpu_mem_usage=UpperCamelCase__ ) model_normal_load.to(UpperCamelCase__ ) model_normal_load.eval() lowerCAmelCase_ = model_normal_load(UpperCamelCase__, UpperCamelCase__ )['''sample'''] assert torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1E-3 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(UpperCamelCase__ ) lowerCAmelCase_ = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), ) lowerCAmelCase_ = noise.to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ ) with torch.no_grad(): lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ).sample lowerCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCAmelCase_ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1E-3 ) ) class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __snake_case = UNetaDModel __snake_case = 'sample' @property def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=(32, 32) ): """simple docstring""" lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=UpperCamelCase__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (3, 32, 32) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return (3, 32, 32) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1E-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''', output_loading_info=UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(UpperCamelCase__ ) lowerCAmelCase_ = self.dummy_input lowerCAmelCase_ = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ ) lowerCAmelCase_ = noise lowerCAmelCase_ = model(**UpperCamelCase__ ) assert image is not None, "Make sure output is not None" @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(UpperCamelCase__ ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 lowerCAmelCase_ = (256, 256) lowerCAmelCase_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ ) with torch.no_grad(): lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ).sample lowerCAmelCase_ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase_ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(UpperCamelCase__ ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 lowerCAmelCase_ = (32, 32) lowerCAmelCase_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ ) with torch.no_grad(): lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ).sample lowerCAmelCase_ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase_ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class A ( __UpperCAmelCase , unittest.TestCase ): __snake_case = XLNetTokenizer __snake_case = XLNetTokenizerFast __snake_case = True __snake_case = True def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = XLNetTokenizer(UpperCamelCase__, keep_accents=UpperCamelCase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''<s>''' lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ), UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ), UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<unk>''' ) self.assertEqual(vocab_keys[1], '''<s>''' ) self.assertEqual(vocab_keys[-1], '''<eod>''' ) self.assertEqual(len(UpperCamelCase__ ), 1006 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1000 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = XLNetTokenizer(UpperCamelCase__, keep_accents=UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ), [285, 46, 10, 170, 382] ) lowerCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase__, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = XLNetTokenizer(UpperCamelCase__, do_lower_case=UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase__, [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ], ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''▁he''', '''ll''', '''o'''] ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = XLNetTokenizer(UpperCamelCase__, do_lower_case=UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase__, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ], ) @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) lowerCAmelCase_ = tokenizer.encode('''sequence builders''', add_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.encode('''multi-sequence build''', add_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__, UpperCamelCase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = {'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__, model_name='''xlnet-base-cased''', revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''', )
278
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A ( unittest.TestCase ): def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=10, UpperCamelCase__=18, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=None, ): """simple docstring""" lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18} lowerCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = num_frames lowerCAmelCase_ = image_size lowerCAmelCase_ = min_resolution lowerCAmelCase_ = max_resolution lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean lowerCAmelCase_ = image_std lowerCAmelCase_ = crop_size def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A ( __UpperCAmelCase , unittest.TestCase ): __snake_case = VivitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = VivitImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} ) lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowerCAmelCase_ = prepare_video_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ ) self.assertIsInstance(video[0], Image.Image ) # Test not batched input lowerCAmelCase_ = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ = prepare_video_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ ) self.assertIsInstance(video[0], np.ndarray ) # Test not batched input lowerCAmelCase_ = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ = prepare_video_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ ) self.assertIsInstance(video[0], torch.Tensor ) # Test not batched input lowerCAmelCase_ = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), )
278
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
1
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __UpperCamelCase ( _A = "isbn/0140328726" ): lowerCAmelCase_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: lowerCAmelCase_ = f"{olid} is not a valid Open Library olid" raise ValueError(_A ) return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json() def __UpperCamelCase ( _A ): lowerCAmelCase_ = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } lowerCAmelCase_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowerCAmelCase_ = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] lowerCAmelCase_ = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_A , _A ): lowerCAmelCase_ = ''', '''.join(_A ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _A = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(f"\nSearching Open Library for ISBN: {isbn}...\n") try: _A = summarize_book(get_openlibrary_data(f"isbn/{isbn}")) print('''\n'''.join(f"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"Sorry, there are no results for ISBN: {isbn}.")
278
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
1
from collections import defaultdict class A : def __init__( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 lowerCAmelCase_ = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase__ ) ) ] lowerCAmelCase_ = defaultdict(UpperCamelCase__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 lowerCAmelCase_ = (1 << len(UpperCamelCase__ )) - 1 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement lowerCAmelCase_ = self.count_ways_until(UpperCamelCase__, task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 ) # save the value. lowerCAmelCase_ = total_ways_util return self.dp[mask][task_no] def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" for i in range(len(UpperCamelCase__ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0, 1 ) if __name__ == "__main__": _A = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _A = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
278
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class A ( unittest.TestCase ): @require_torch def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = pipeline( task='''zero-shot-audio-classification''', model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) lowerCAmelCase_ = load_dataset('''ashraq/esc50''' ) lowerCAmelCase_ = dataset['''train''']['''audio'''][-1]['''array'''] lowerCAmelCase_ = audio_classifier(UpperCamelCase__, candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ), [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}], ) @unittest.skip('''No models are available in TF''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass @slow @require_torch def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = pipeline( task='''zero-shot-audio-classification''', model='''laion/clap-htsat-unfused''', ) # This is an audio of a dog lowerCAmelCase_ = load_dataset('''ashraq/esc50''' ) lowerCAmelCase_ = dataset['''train''']['''audio'''][-1]['''array'''] lowerCAmelCase_ = audio_classifier(UpperCamelCase__, candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ), [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ) lowerCAmelCase_ = audio_classifier([audio] * 5, candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(UpperCamelCase__ ), [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5, ) lowerCAmelCase_ = audio_classifier( [audio] * 5, candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''], batch_size=5 ) self.assertEqual( nested_simplify(UpperCamelCase__ ), [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5, ) @unittest.skip('''No models are available in TF''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
1
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
278
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A ( __UpperCAmelCase ): __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'BlipImageProcessor' __snake_case = 'AutoTokenizer' def __init__( self, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = False super().__init__(UpperCamelCase__, UpperCamelCase__ ) lowerCAmelCase_ = self.image_processor def __call__( self, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = True, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = 0, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, **UpperCamelCase__, ): """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCAmelCase_ = self.tokenizer lowerCAmelCase_ = self.tokenizer( text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, ) return text_encoding # add pixel_values lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=UpperCamelCase__ ) if text is not None: lowerCAmelCase_ = self.tokenizer( text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, ) else: lowerCAmelCase_ = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.tokenizer.model_input_names lowerCAmelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
278
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A = '''pt''' elif is_tf_available(): _A = '''tf''' else: _A = '''jax''' class A ( __UpperCAmelCase , unittest.TestCase ): __snake_case = PerceiverTokenizer __snake_case = False def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase_ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=20, UpperCamelCase__=5 ): """simple docstring""" lowerCAmelCase_ = [] for i in range(len(UpperCamelCase__ ) ): try: lowerCAmelCase_ = tokenizer.decode([i], clean_up_tokenization_spaces=UpperCamelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : re.match(R'''^[ a-zA-Z]+$''', t[1] ), UpperCamelCase__ ) ) lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=UpperCamelCase__ ), UpperCamelCase__ ) ) if max_length is not None and len(UpperCamelCase__ ) > max_length: lowerCAmelCase_ = toks[:max_length] if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0: while len(UpperCamelCase__ ) < min_length: lowerCAmelCase_ = toks + toks # toks_str = [t[1] for t in toks] lowerCAmelCase_ = [t[0] for t in toks] # Ensure consistency lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ ) if " " not in output_txt and len(UpperCamelCase__ ) > 1: lowerCAmelCase_ = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=UpperCamelCase__ ) + ''' ''' + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=UpperCamelCase__ ) ) if with_prefix_space: lowerCAmelCase_ = ''' ''' + output_txt lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) return output_txt, output_ids def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.perceiver_tokenizer lowerCAmelCase_ = '''Unicode €.''' lowerCAmelCase_ = tokenizer(UpperCamelCase__ ) lowerCAmelCase_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''], UpperCamelCase__ ) # decoding lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, '''[CLS]Unicode €.[SEP]''' ) lowerCAmelCase_ = tokenizer('''e è é ê ë''' ) lowerCAmelCase_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''], UpperCamelCase__ ) # decoding lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ), '''[CLS]e è é ê ë[SEP]''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.perceiver_tokenizer lowerCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off lowerCAmelCase_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowerCAmelCase_ = tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ ) if FRAMEWORK != "jax": lowerCAmelCase_ = list(batch.input_ids.numpy()[0] ) else: lowerCAmelCase_ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase__, UpperCamelCase__ ) self.assertEqual((2, 38), batch.input_ids.shape ) self.assertEqual((2, 38), batch.attention_mask.shape ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.perceiver_tokenizer lowerCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCAmelCase_ = tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''', UpperCamelCase__ ) self.assertIn('''attention_mask''', UpperCamelCase__ ) self.assertNotIn('''decoder_input_ids''', UpperCamelCase__ ) self.assertNotIn('''decoder_attention_mask''', UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.perceiver_tokenizer lowerCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] lowerCAmelCase_ = tokenizer( text_target=UpperCamelCase__, max_length=32, padding='''max_length''', truncation=UpperCamelCase__, return_tensors=UpperCamelCase__ ) self.assertEqual(32, targets['''input_ids'''].shape[1] ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test lowerCAmelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running''' lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = after_tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__, UpperCamelCase__ ) shutil.rmtree(UpperCamelCase__ ) lowerCAmelCase_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) lowerCAmelCase_ = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = after_tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__, UpperCamelCase__ ) self.assertIn('''new_additional_special_token''', after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) lowerCAmelCase_ = tokenizer.__class__.from_pretrained(UpperCamelCase__, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''special_tokens_map.json''' ), encoding='''utf-8''' ) as json_file: lowerCAmelCase_ = json.load(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), encoding='''utf-8''' ) as json_file: lowerCAmelCase_ = json.load(UpperCamelCase__ ) lowerCAmelCase_ = [f"<extra_id_{i}>" for i in range(125 )] lowerCAmelCase_ = added_tokens_extra_ids + [ '''an_additional_special_token''' ] lowerCAmelCase_ = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(UpperCamelCase__, '''special_tokens_map.json''' ), '''w''', encoding='''utf-8''' ) as outfile: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''', encoding='''utf-8''' ) as outfile: json.dump(UpperCamelCase__, UpperCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase_ = tokenizer_class.from_pretrained( UpperCamelCase__, ) self.assertIn( '''an_additional_special_token''', tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''', lstrip=UpperCamelCase__ )] lowerCAmelCase_ = tokenizer_class.from_pretrained( UpperCamelCase__, additional_special_tokens=UpperCamelCase__, ) self.assertIn('''a_new_additional_special_token''', tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ), ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ), '''�''' ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.get_tokenizers(fast=UpperCamelCase__, do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCAmelCase_ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] lowerCAmelCase_ = tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
278
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
1
from __future__ import annotations _A = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _A = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] lowerCAmelCase_ = len(_A ) for i in range(_A ): lowerCAmelCase_ = -1 for j in range(i + 1 , _A ): if arr[i] < arr[j]: lowerCAmelCase_ = arr[j] break result.append(_A ) return result def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for i, outer in enumerate(_A ): lowerCAmelCase_ = -1 for inner in arr[i + 1 :]: if outer < inner: lowerCAmelCase_ = inner break result.append(_A ) return result def __UpperCamelCase ( _A ): lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = [] lowerCAmelCase_ = [-1] * arr_size for index in reversed(range(_A ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: lowerCAmelCase_ = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _A = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
278
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
1
import math def __UpperCamelCase ( _A ): if not isinstance(_A , _A ): lowerCAmelCase_ = f"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 1: lowerCAmelCase_ = f"Input value of [number={number}] must be > 0" raise ValueError(_A ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase_ = [3, 5] lowerCAmelCase_ = 2 lowerCAmelCase_ = 3 for block in range(1 , _A ): for _ in range(_A ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): _A = 0 try: _A = proth(number) except ValueError: print(f"ValueError: there is no {number}th Proth number") continue print(f"The {number}th Proth number: {value}")
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
1