code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __a = """CompVis/stable-diffusion-v1-1""" __a = """CompVis/stable-diffusion-v1-2""" __a = """CompVis/stable-diffusion-v1-3""" __a = """CompVis/stable-diffusion-v1-4""" class lowerCamelCase ( lowerCamelCase__ ): '''simple docstring''' def __init__( self: Union[str, Any] , snake_case: Dict , snake_case: int , snake_case: Optional[int] , snake_case: List[str] , snake_case: List[str] , snake_case: str , snake_case: List[Any] , snake_case: Tuple = True , ) -> Dict: super()._init_() snake_case_ :int = StableDiffusionPipeline.from_pretrained(snake_case ) snake_case_ :List[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) snake_case_ :int = StableDiffusionPipeline.from_pretrained(snake_case ) snake_case_ :Tuple = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowerCAmelCase_ ( self: Any ) -> Optional[Any]: return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("""_""" )} def lowerCAmelCase_ ( self: List[str] , snake_case: List[Any] = "auto" ) -> str: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory snake_case_ :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: self.enable_attention_slicing(snake_case ) @torch.no_grad() def lowerCAmelCase_ ( self: Any , snake_case: Any , snake_case: str = 512 , snake_case: str = 512 , snake_case: Tuple = 50 , snake_case: Optional[Any] = 7.5 , snake_case: Tuple = None , snake_case: Dict = 1 , snake_case: List[str] = 0.0 , snake_case: Optional[int] = None , snake_case: List[Any] = None , snake_case: Tuple = "pil" , snake_case: Union[str, Any] = True , snake_case: Optional[int] = None , snake_case: str = 1 , **snake_case: str , ) -> Optional[int]: return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str , snake_case: str = 512 , snake_case: List[str] = 512 , snake_case: Any = 50 , snake_case: str = 7.5 , snake_case: Optional[Any] = None , snake_case: Any = 1 , snake_case: List[Any] = 0.0 , snake_case: Tuple = None , snake_case: Optional[int] = None , snake_case: List[Any] = "pil" , snake_case: Any = True , snake_case: Any = None , snake_case: str = 1 , **snake_case: int , ) -> Optional[Any]: return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def lowerCAmelCase_ ( self: Tuple , snake_case: Dict , snake_case: Optional[Any] = 512 , snake_case: Optional[Any] = 512 , snake_case: Dict = 50 , snake_case: str = 7.5 , snake_case: List[Any] = None , snake_case: Dict = 1 , snake_case: str = 0.0 , snake_case: Tuple = None , snake_case: Dict = None , snake_case: Dict = "pil" , snake_case: Dict = True , snake_case: Union[str, Any] = None , snake_case: List[Any] = 1 , **snake_case: Optional[Any] , ) -> Union[str, Any]: return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[Any] , snake_case: List[str] = 512 , snake_case: int = 512 , snake_case: str = 50 , snake_case: Dict = 7.5 , snake_case: str = None , snake_case: List[str] = 1 , snake_case: List[Any] = 0.0 , snake_case: int = None , snake_case: Union[str, Any] = None , snake_case: str = "pil" , snake_case: Optional[int] = True , snake_case: List[str] = None , snake_case: Optional[int] = 1 , **snake_case: List[str] , ) -> str: return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[int] , snake_case: Any = 512 , snake_case: Optional[Any] = 512 , snake_case: Optional[Any] = 50 , snake_case: Optional[int] = 7.5 , snake_case: str = None , snake_case: List[Any] = 1 , snake_case: Dict = 0.0 , snake_case: Tuple = None , snake_case: int = None , snake_case: Dict = "pil" , snake_case: Any = True , snake_case: List[str] = None , snake_case: List[Any] = 1 , **snake_case: Optional[Any] , ) -> str: snake_case_ :Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 snake_case_ :Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 snake_case_ :Optional[int] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 snake_case_ :Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 snake_case_ :Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
66
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings UpperCAmelCase = R""" [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `\" / \"`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `\" // \"`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `\"train\"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `\"compressed\"`) The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and `\"compressed\"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a \"dummy\" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. """ @add_start_docstrings(lowerCamelCase__ ) class lowerCAmelCase ( lowerCamelCase__ ): lowerCAmelCase_ = 'rag' lowerCAmelCase_ = True def __init__( self : Optional[Any] , __lowercase : Optional[Any]=None , __lowercase : str=True , __lowercase : str=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : str=None , __lowercase : Optional[Any]=None , __lowercase : str=" / " , __lowercase : Any=" // " , __lowercase : int=5 , __lowercase : List[str]=300 , __lowercase : List[Any]=768 , __lowercase : List[Any]=8 , __lowercase : Union[str, Any]="wiki_dpr" , __lowercase : List[str]="train" , __lowercase : str="compressed" , __lowercase : List[Any]=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=False , __lowercase : Tuple=False , __lowercase : Any=0.0 , __lowercase : Optional[int]=True , __lowercase : str=False , __lowercase : Union[str, Any]=False , __lowercase : List[str]=False , __lowercase : str=True , __lowercase : List[Any]=None , **__lowercase : Dict , ): """simple docstring""" super().__init__( bos_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , is_encoder_decoder=__lowercase , prefix=__lowercase , vocab_size=__lowercase , **__lowercase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" __lowercase =kwargs.pop('question_encoder' ) __lowercase =question_encoder_config.pop('model_type' ) __lowercase =kwargs.pop('generator' ) __lowercase =decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig __lowercase =AutoConfig.for_model(__lowercase , **__lowercase ) __lowercase =AutoConfig.for_model(__lowercase , **__lowercase ) __lowercase =reduce_loss __lowercase =label_smoothing __lowercase =exclude_bos_score __lowercase =do_marginalize __lowercase =title_sep __lowercase =doc_sep __lowercase =n_docs __lowercase =max_combined_length __lowercase =dataset __lowercase =dataset_split __lowercase =index_name __lowercase =retrieval_vector_size __lowercase =retrieval_batch_size __lowercase =passages_path __lowercase =index_path __lowercase =use_dummy_dataset __lowercase =output_retrieved __lowercase =do_deduplication __lowercase =use_cache if self.forced_eos_token_id is None: __lowercase =getattr(self.generator , 'forced_eos_token_id' , __lowercase ) @classmethod def snake_case ( cls : Union[str, Any] , __lowercase : Dict , __lowercase : Dict , **__lowercase : Tuple ): """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__lowercase ) def snake_case ( self : str ): """simple docstring""" __lowercase =copy.deepcopy(self.__dict__ ) __lowercase =self.question_encoder.to_dict() __lowercase =self.generator.to_dict() __lowercase =self.__class__.model_type return output
141
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : str = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") UpperCamelCase : str = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) UpperCamelCase : Optional[int] = model.state_dict() def to_tf_var_name(_lowerCAmelCase ): for patt, repl in iter(__snake_case ): UpperCamelCase : List[str] = name.replace(__snake_case , __snake_case ) return F"""bert/{name}""" def create_tf_var(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype ) UpperCamelCase : str = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase : Any = to_tf_var_name(__snake_case ) UpperCamelCase : int = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCamelCase : int = torch_tensor.T UpperCamelCase : Union[str, Any] = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) UpperCamelCase : List[Any] = session.run(__snake_case ) print(F"""Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}""" ) UpperCamelCase : Tuple = tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) ) def A_ ( _lowerCAmelCase=None ) -> Any: UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" ) UpperCamelCase : Union[str, Any] = parser.parse_args(__snake_case ) UpperCamelCase : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
52
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
0
def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def a ( lowerCamelCase_ , lowerCamelCase_=0 ): '''simple docstring''' return sorted(__snake_case , key=lambda lowerCamelCase_ : x[column] ) def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('''inf''' ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , __snake_case ): lowercase__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowercase__ = current_dis return min_dis def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('''inf''' ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , __snake_case ): for j in range(max(0 , i - 6 ) , __snake_case ): lowercase__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowercase__ = current_dis return min_dis def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' # base case if points_counts <= 3: return dis_between_closest_pair(__snake_case , __snake_case ) # recursion lowercase__ = points_counts // 2 lowercase__ = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[:mid] , __snake_case ) lowercase__ = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[mid:] , points_counts - mid ) lowercase__ = min(__snake_case , __snake_case ) lowercase__ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__snake_case ) lowercase__ = dis_between_closest_in_strip( __snake_case , len(__snake_case ) , __snake_case ) return min(__snake_case , __snake_case ) def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = column_based_sort(__snake_case , column=0 ) lowercase__ = column_based_sort(__snake_case , column=1 ) return ( closest_pair_of_points_sqr( __snake_case , __snake_case , __snake_case ) ) ** 0.5 if __name__ == "__main__": A__ : Optional[int] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
207
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
0
from datetime import datetime as dt import os from github import Github _UpperCamelCase = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def _lowercase ( ): __lowerCAmelCase : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCAmelCase : str = g.get_repo('''huggingface/transformers''' ) __lowerCAmelCase : Optional[Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCAmelCase : str = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=__snake_case ) __lowerCAmelCase : List[str] = comments[0] if len(__snake_case ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
275
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] a_ = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] a_ = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): a_ = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
0
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel A__ : Dict =HfApi() A__ : Tuple ={} # fmt: off A__ : Union[str, Any] =torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) A__ : List[str] =torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) A__ : Tuple =torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) A__ : Union[str, Any] =torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) A__ : Union[str, Any] =torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) A__ : Union[str, Any] =torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) A__ : Optional[int] =torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) A__ : Tuple =torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) A__ : List[Any] =torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) A__ : Tuple =torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) A__ : List[str] =torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) A__ : int =torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) A__ : Union[str, Any] =torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) A__ : Dict =torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) A__ : Optional[int] =torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on A__ : Union[str, Any] =api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": A__ : Optional[Any] ="""/home/patrick/google_checkpoints/""" + mod.modelId.split('''/''')[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('''CompVis'''): A__ : str =UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: A__ : int =UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) A__ : Optional[Any] =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) A__ : str =torch.tensor([10] * noise.shape[0]) with torch.no_grad(): A__ : List[Any] =model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @property def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.dummy_uncond_unet lowerCamelCase = ScoreSdeVeScheduler() lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a ).images lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a , return_dict=_a )[ 0 ] lowerCamelCase = image[0, -3:, -3:, -1] lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = """google/ncsnpp-church-256""" lowerCamelCase = UNetaDModel.from_pretrained(_a ) lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(_a ) lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_a ).images lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
291
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class lowerCAmelCase_ ( lowerCamelCase__ ): """simple docstring""" _lowerCAmelCase : Union[PIL.Image.Image, np.ndarray] class lowerCAmelCase_ ( lowerCamelCase__ ): """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" super().__init__() self.register_modules( prior=lowerCAmelCase , image_encoder=lowerCAmelCase , image_processor=lowerCAmelCase , scheduler=lowerCAmelCase , renderer=lowerCAmelCase , ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if latents is None: snake_case = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) snake_case = latents.to(lowerCAmelCase ) snake_case = latents * scheduler.init_noise_sigma return latents def snake_case ( self , lowerCAmelCase=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) snake_case = torch.device(F"""cuda:{gpu_id}""" ) snake_case = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase , lowerCAmelCase ) @property def snake_case ( self ): """simple docstring""" if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(lowerCAmelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ): snake_case = torch.cat(lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase , axis=0 ) if not isinstance(lowerCAmelCase , torch.Tensor ): snake_case = self.image_processor(lowerCAmelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) snake_case = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase ) snake_case = self.image_encoder(lowerCAmelCase )['last_hidden_state'] snake_case = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 snake_case = image_embeds.repeat_interleave(lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: snake_case = torch.zeros_like(lowerCAmelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(lowerCAmelCase ) def __call__( self , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = 25 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 4.0 , lowerCAmelCase = 64 , lowerCAmelCase = "pil" , lowerCAmelCase = True , ): """simple docstring""" if isinstance(lowerCAmelCase , PIL.Image.Image ): snake_case = 1 elif isinstance(lowerCAmelCase , torch.Tensor ): snake_case = image.shape[0] elif isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): snake_case = len(lowerCAmelCase ) else: raise ValueError( F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase )}""" ) snake_case = self._execution_device snake_case = batch_size * num_images_per_prompt snake_case = guidance_scale > 1.0 snake_case = self._encode_image(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # prior self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase ) snake_case = self.scheduler.timesteps snake_case = self.prior.config.num_embeddings snake_case = self.prior.config.embedding_dim snake_case = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim snake_case = latents.reshape(latents.shape[0] , lowerCAmelCase , lowerCAmelCase ) for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents snake_case = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase ) snake_case = self.prior( lowerCAmelCase , timestep=lowerCAmelCase , proj_embedding=lowerCAmelCase , ).predicted_image_embedding # remove the variance snake_case ,snake_case = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: snake_case ,snake_case = noise_pred.chunk(2 ) snake_case = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) snake_case = self.scheduler.step( lowerCAmelCase , timestep=lowerCAmelCase , sample=lowerCAmelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=lowerCAmelCase ) snake_case = [] for i, latent in enumerate(lowerCAmelCase ): print() snake_case = self.renderer.decode( latent[None, :] , lowerCAmelCase , size=lowerCAmelCase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , ) images.append(lowerCAmelCase ) snake_case = torch.stack(lowerCAmelCase ) if output_type not in ["np", "pil"]: raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" ) snake_case = images.cpu().numpy() if output_type == "pil": snake_case = [self.numpy_to_pil(lowerCAmelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=lowerCAmelCase )
150
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowercase__ = """pt""" elif is_tf_available(): lowercase__ = """tf""" else: lowercase__ = """jax""" class lowerCAmelCase__ ( lowerCamelCase__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PerceiverTokenizer lowerCamelCase__ = False def A_ ( self ): super().setUp() _lowerCamelCase : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def A_ ( self , **lowercase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A_ ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ): _lowerCamelCase : Optional[Any] = [] for i in range(len(lowercase ) ): try: _lowerCamelCase : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowerCamelCase : List[str] = list(filter(lambda lowercase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowercase ) ) _lowerCamelCase : Optional[int] = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: _lowerCamelCase : Any = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: _lowerCamelCase : List[str] = toks + toks # toks_str = [t[1] for t in toks] _lowerCamelCase : int = [t[0] for t in toks] # Ensure consistency _lowerCamelCase : Optional[int] = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: _lowerCamelCase : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: _lowerCamelCase : Tuple = ' ' + output_txt _lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A_ ( self ): _lowerCamelCase : Optional[int] = self.perceiver_tokenizer _lowerCamelCase : int = 'Unicode €.' _lowerCamelCase : int = tokenizer(lowercase ) _lowerCamelCase : str = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , lowercase ) # decoding _lowerCamelCase : List[str] = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' ) _lowerCamelCase : Optional[int] = tokenizer('e è é ê ë' ) _lowerCamelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , lowercase ) # decoding _lowerCamelCase : Optional[int] = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.perceiver_tokenizer _lowerCamelCase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowerCamelCase : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on _lowerCamelCase : Tuple = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": _lowerCamelCase : Dict = list(batch.input_ids.numpy()[0] ) else: _lowerCamelCase : Tuple = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.perceiver_tokenizer _lowerCamelCase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowerCamelCase : Optional[Any] = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , lowercase ) self.assertIn('attention_mask' , lowercase ) self.assertNotIn('decoder_input_ids' , lowercase ) self.assertNotIn('decoder_attention_mask' , lowercase ) def A_ ( self ): _lowerCamelCase : int = self.perceiver_tokenizer _lowerCamelCase : str = [ 'Summary of the text.', 'Another summary.', ] _lowerCamelCase : Tuple = tokenizer( text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def A_ ( self ): _lowerCamelCase : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowerCamelCase : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : Optional[int] = tempfile.mkdtemp() _lowerCamelCase : int = ' He is very happy, UNwant\u00E9d,running' _lowerCamelCase : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _lowerCamelCase : int = tokenizer.__class__.from_pretrained(lowercase ) _lowerCamelCase : List[Any] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) _lowerCamelCase : List[str] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : Dict = tempfile.mkdtemp() _lowerCamelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowerCamelCase : List[str] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowerCamelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.__class__.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowerCamelCase : Dict = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowerCamelCase : List[Any] = json.load(lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowerCamelCase : int = json.load(lowercase ) _lowerCamelCase : int = [F'''<extra_id_{i}>''' for i in range(125 )] _lowerCamelCase : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowerCamelCase : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowerCamelCase : Optional[int] = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowerCamelCase : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )] _lowerCamelCase : Any = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def A_ ( self ): _lowerCamelCase : Tuple = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): _lowerCamelCase : Tuple = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase : Dict = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
96
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __A (lowerCamelCase__ , lowerCamelCase__): '''simple docstring''' __lowercase: Tuple = 'pixel_values' __lowercase: Any = False __lowercase: int = TimmBackboneConfig def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ) ->List[Any]: """simple docstring""" requires_backends(self , """timm""" ) super().__init__(UpperCAmelCase_ ) snake_case_ = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(UpperCAmelCase_ , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) snake_case_ = getattr(UpperCAmelCase_ , """use_pretrained_backbone""" , UpperCAmelCase_ ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. snake_case_ = config.out_indices if getattr(UpperCAmelCase_ , """out_indices""" , UpperCAmelCase_ ) is not None else (-1,) snake_case_ = timm.create_model( config.backbone , pretrained=UpperCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase_ , **UpperCAmelCase_ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. snake_case_ = self._backbone.return_layers snake_case_ = {layer["""module"""]: str(UpperCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCAmelCase_ ) @classmethod def lowerCAmelCase ( cls : Tuple , UpperCAmelCase_ : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig snake_case_ = kwargs.pop("""config""" , TimmBackboneConfig() ) snake_case_ = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase_ ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) snake_case_ = kwargs.pop("""num_channels""" , config.num_channels ) snake_case_ = kwargs.pop("""features_only""" , config.features_only ) snake_case_ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) snake_case_ = kwargs.pop("""out_indices""" , config.out_indices ) snake_case_ = TimmBackboneConfig( backbone=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , features_only=UpperCAmelCase_ , use_pretrained_backbone=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , ) return super()._from_config(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict ) ->List[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone snake_case_ = self._all_layers snake_case_ = self._backbone(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = self._return_layers snake_case_ = tuple(hidden_states[i] for i in self.out_indices ) else: snake_case_ = self._backbone(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = None snake_case_ = tuple(UpperCAmelCase_ ) snake_case_ = tuple(UpperCAmelCase_ ) if hidden_states is not None else None if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: snake_case_ = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , attentions=UpperCAmelCase_ )
347
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
0
"""simple docstring""" __a = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
66
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
141
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def A_ ( _lowerCAmelCase ) -> int: monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def A_ ( _lowerCAmelCase ) -> Union[str, Any]: class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Any = metric_id class A__ : _UpperCAmelCase :Dict = [MetricMock(lowerCamelCase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def __UpperCamelCase( self ): '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: if "tmp_path" in args: UpperCamelCase : Dict = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(__snake_case , match="https://huggingface.co/docs/evaluate" ): func(*__snake_case )
52
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
0
from __future__ import annotations import math def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) ) def a ( ): '''simple docstring''' lowercase__ = [90, 23, 6, 33, 21, 65, 123, 3_4423] lowercase__ = math.log(len(__snake_case ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
207
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
0
from typing import Any class __lowercase : def __init__( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = data __lowerCAmelCase : Dict = None def __repr__( self ) ->Any: '''simple docstring''' return f"""Node({self.data})""" class __lowercase : def __init__( self ) ->str: '''simple docstring''' __lowerCAmelCase : List[Any] = None def __iter__( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.head while node: yield node.data __lowerCAmelCase : Tuple = node.next def __len__( self ) ->Optional[int]: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ) ->int: '''simple docstring''' return "->".join([str(A_ ) for item in self] ) def __getitem__( self , A_ ) ->str: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __lowerCAmelCase : List[Any] = self.head for _ in range(A_ ): __lowerCAmelCase : Optional[Any] = current.next __lowerCAmelCase : List[str] = data def UpperCamelCase__ ( self , A_ ) ->Tuple: '''simple docstring''' self.insert_nth(len(self ) , A_ ) def UpperCamelCase__ ( self , A_ ) ->List[str]: '''simple docstring''' self.insert_nth(0 , A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Any: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __lowerCAmelCase : Union[str, Any] = Node(A_ ) if self.head is None: __lowerCAmelCase : List[str] = new_node elif index == 0: __lowerCAmelCase : str = self.head # link new_node to head __lowerCAmelCase : Optional[int] = new_node else: __lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): __lowerCAmelCase : int = temp.next __lowerCAmelCase : str = temp.next __lowerCAmelCase : Dict = new_node def UpperCamelCase__ ( self ) ->Optional[int]: # print every node data '''simple docstring''' print(self ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' return self.delete_nth(0 ) def UpperCamelCase__ ( self ) ->str: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCamelCase__ ( self , A_ = 0 ) ->Dict: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __lowerCAmelCase : List[Any] = self.head # default first node if index == 0: __lowerCAmelCase : Any = self.head.next else: __lowerCAmelCase : Any = self.head for _ in range(index - 1 ): __lowerCAmelCase : Union[str, Any] = temp.next __lowerCAmelCase : Optional[int] = temp.next __lowerCAmelCase : Tuple = temp.next.next return delete_node.data def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.head is None def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : str = None __lowerCAmelCase : Union[str, Any] = self.head while current: # Store the current node's next node. __lowerCAmelCase : Optional[Any] = current.next # Make the current node's next point backwards __lowerCAmelCase : str = prev # Make the previous node be the current node __lowerCAmelCase : List[Any] = current # Make the current node the next node (to progress iteration) __lowerCAmelCase : Tuple = next_node # Return prev in order to put the head at the end __lowerCAmelCase : List[Any] = prev def _lowercase ( ): __lowerCAmelCase : str = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(1_0 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 1_1 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(1_1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 1_2 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 1_0 assert linked_list.delete_tail() == 1_1 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 1_0 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __lowerCAmelCase : Any = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def _lowercase ( ): __lowerCAmelCase : int = [ -9, 1_0_0, Node(7_7_3_4_5_1_1_2 ), '''dlrow olleH''', 7, 5_5_5_5, 0, -1_9_2.5_5_5_5_5, '''Hello, world!''', 7_7.9, Node(1_0 ), None, None, 1_2.2_0, ] __lowerCAmelCase : Any = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __lowerCAmelCase : Optional[int] = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __lowerCAmelCase : str = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __lowerCAmelCase : Union[str, Any] = linked_list.delete_nth(1_0 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _lowercase ( ): from doctest import testmod testmod() __lowerCAmelCase : Union[str, Any] = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(__snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"""Element at Position 1: {linked_list[1]}""" ) __lowerCAmelCase : Optional[Any] = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(__snake_case ) print(f"""length of linked_list is : {len(__snake_case )}""" ) if __name__ == "__main__": main()
275
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a_ = logging.get_logger(__name__) a_ = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class lowercase__ ( lowerCamelCase__ ): a_ ='dpt' def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=384 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=[2, 5, 8, 11] , __UpperCAmelCase="project" , __UpperCAmelCase=[4, 2, 1, 0.5] , __UpperCAmelCase=[96, 192, 384, 768] , __UpperCAmelCase=256 , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.4 , __UpperCAmelCase=255 , __UpperCAmelCase=0.1 , __UpperCAmelCase=[1, 1024, 24, 24] , __UpperCAmelCase=[0, 1] , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Tuple: '''simple docstring''' super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) lowerCAmelCase__ = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } lowerCAmelCase__ = BitConfig(**__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): logger.info("Initializing the config with a `BiT` backbone." ) lowerCAmelCase__ = BitConfig(**__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = backbone_config else: raise ValueError( F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." ) lowerCAmelCase__ = backbone_featmap_shape lowerCAmelCase__ = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." ) else: lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = [] lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = qkv_bias lowerCAmelCase__ = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" ) lowerCAmelCase__ = readout_type lowerCAmelCase__ = reassemble_factors lowerCAmelCase__ = neck_hidden_sizes lowerCAmelCase__ = fusion_hidden_size lowerCAmelCase__ = head_in_index lowerCAmelCase__ = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) lowerCAmelCase__ = use_auxiliary_head lowerCAmelCase__ = auxiliary_loss_weight lowerCAmelCase__ = semantic_loss_ignore_index lowerCAmelCase__ = semantic_classifier_dropout def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase__ = self.backbone_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
340
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
0
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): A__ : List[str] =yaml.safe_load( '''\ name: \"\" allow_empty: false allow_empty_text: true subsections: - name: \"Dataset Card for X\" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: \"Table of Contents\" allow_empty: false allow_empty_text: false subsections: null - name: \"Dataset Description\" allow_empty: false allow_empty_text: false subsections: - name: \"Dataset Summary\" allow_empty: false allow_empty_text: false subsections: null - name: \"Supported Tasks and Leaderboards\" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) A__ : int ={ """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } A__ : List[str] ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : str ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : List[str] ={ """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Extra Ignored Subsection""", """text""": """""", """is_empty_text""": True, """subsections""": [], } ], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } A__ : str ="""\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : Tuple =( """The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.""" ) A__ : List[Any] ="""\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : Dict =( """The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.""" ) A__ : List[str] ="""\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : str ="""The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.""" A__ : Optional[Any] ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : int ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).""" A__ : Optional[Any] ="""\ --- language: - zh - en --- # Dataset Card for My Dataset """ A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'.""" A__ : Any ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text """ A__ : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.""" A__ : int ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages """ A__ : str ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.""" A__ : int ="""\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.""" A__ : int ="""\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset """ A__ : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.""" A__ : Dict ="""\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.""" A__ : Optional[Any] ="""""" A__ : Tuple ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.""" A__ : Optional[Any] ="""\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ A__ : Dict ="""The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.""" @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" assert ReadMe.from_string(__snake_case , __snake_case ).to_dict() == expected_dict @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ): _lowerCAmelCase = ReadMe.from_string(__snake_case , __snake_case ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ): ReadMe.from_string(__snake_case , __snake_case ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" ReadMe.from_string(__snake_case , __snake_case , suppress_parsing_errors=__snake_case ) @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(__snake_case ) / """README.md""" with open(__snake_case , """w+""" ) as readme_file: readme_file.write(__snake_case ) _lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(__snake_case ) / """README.md""" with open(__snake_case , """w+""" ) as readme_file: readme_file.write(__snake_case ) _lowerCAmelCase = expected_error.format(path=__snake_case ) with pytest.raises(__snake_case , match=re.escape(__snake_case ) ): _lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(__snake_case ) / """README.md""" with open(__snake_case , """w+""" ) as readme_file: readme_file.write(__snake_case ) _lowerCAmelCase = expected_error.format(path=__snake_case ) with pytest.raises(__snake_case , match=re.escape(__snake_case ) ): ReadMe.from_readme(__snake_case , __snake_case ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(__snake_case ) / """README.md""" with open(__snake_case , """w+""" ) as readme_file: readme_file.write(__snake_case ) ReadMe.from_readme(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
70
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
0
"""simple docstring""" def a__ ( snake_case__ ) -> bool: if not isinstance(__snake_case , __snake_case ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(__snake_case ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(__snake_case ) == 1: return True lowerCamelCase = series[1] - series[0] for index in range(len(__snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def a__ ( snake_case__ ) -> float: if not isinstance(__snake_case , __snake_case ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(__snake_case ) == 0: raise ValueError("""Input list must be a non empty list""" ) lowerCamelCase = 0 for val in series: answer += val return answer / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
291
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
0
"""simple docstring""" import itertools import math def lowerCAmelCase__ ( _UpperCamelCase : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase__ ( ) -> List[str]: """simple docstring""" snake_case = 2 while True: if is_prime(__snake_case ): yield num num += 1 def lowerCAmelCase__ ( _UpperCamelCase : int = 1_0_0_0_1 ) -> int: """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) ) if __name__ == "__main__": print(f"""{solution() = }""")
150
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
0
"""simple docstring""" def _snake_case ( lowercase__ ): if not nums: # Makes sure that the list is not empty raise ValueError('List is empty' ) _lowerCamelCase : int = sum(__snake_case ) / len(__snake_case ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
96
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
0
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _a ( _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = [] for line in lines: snake_case_ = re.sub(r"""#.*""" , """""" , __snake_case ) # remove comments if line: filtered_lines.append(__snake_case ) snake_case_ = """\n""".join(__snake_case ) # Make a hash from all this code snake_case_ = full_str.encode("""utf-8""" ) return shaaaa(__snake_case ).hexdigest() # get importable module names and hash for caching __SCREAMING_SNAKE_CASE : int = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __SCREAMING_SNAKE_CASE : Union[str, Any] = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __SCREAMING_SNAKE_CASE : List[str] = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name __SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
347
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def A_ ( _lowercase, _lowercase, _lowercase = 10**-10 ): '''simple docstring''' snake_case_ :List[str] = a while True: snake_case_ :Tuple = Decimal(__snake_case ) - ( Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__snake_case ) ) < precision: # noqa: S307 return float(__snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
66
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
0
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = """▁""" UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} UpperCAmelCase = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } UpperCAmelCase = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } UpperCAmelCase = { """ernie-m-base""": 514, """ernie-m-large""": 514, } UpperCAmelCase = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class lowerCAmelCase ( lowerCamelCase__ ): lowerCAmelCase_ = ["input_ids"] lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = RESOURCE_FILES_NAMES def __init__( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : str=None , __lowercase : Optional[Any]=False , __lowercase : Any="utf8" , __lowercase : List[Any]="[UNK]" , __lowercase : str="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[str]="[CLS]" , __lowercase : int="[MASK]" , __lowercase : Optional[Any] = None , **__lowercase : Any , ): """simple docstring""" __lowercase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) __lowercase =do_lower_case __lowercase =sentencepiece_model_ckpt __lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: __lowercase =self.load_vocab(filepath=__lowercase ) else: __lowercase ={self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )} __lowercase ={v: k for k, v in self.vocab.items()} def snake_case ( self : Any , __lowercase : Any ): """simple docstring""" if text is None: return None __lowercase =self.tokenize(__lowercase ) __lowercase , __lowercase ='', [] for i, ch in enumerate(__lowercase ): if ch in self.SP_CHAR_MAPPING: __lowercase =self.SP_CHAR_MAPPING.get(__lowercase ) else: __lowercase =unicodedata.normalize('NFKC' , __lowercase ) if self.is_whitespace(__lowercase ): continue normalized_text += ch char_mapping.extend([i] * len(__lowercase ) ) __lowercase , __lowercase , __lowercase =normalized_text, [], 0 if self.do_lower_case: __lowercase =text.lower() for token in split_tokens: if token[:1] == "▁": __lowercase =token[1:] __lowercase =text[offset:].index(__lowercase ) + offset __lowercase =start + len(__lowercase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) __lowercase =end return token_mapping @property def snake_case ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def snake_case ( self : Union[str, Any] ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : List[Any] ): """simple docstring""" __lowercase =self.__dict__.copy() __lowercase =None return state def __setstate__( self : List[Any] , __lowercase : Dict ): """simple docstring""" __lowercase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase ={} __lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def snake_case ( self : Union[str, Any] , __lowercase : Union[str, Any] ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) ) def snake_case ( self : List[str] , __lowercase : List[str] , __lowercase : int=False , __lowercase : Any=64 , __lowercase : Optional[int]=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: __lowercase =True if self.sp_model_kwargs.get('alpha' ) is not None: __lowercase =self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: __lowercase =self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: __lowercase =self.sp_model.EncodeAsPieces(__lowercase ) else: __lowercase =self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase ) __lowercase =[] for pi, piece in enumerate(__lowercase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__lowercase ) and pi != 0: new_pieces.append(__lowercase ) continue else: continue __lowercase =0 for i, chunk in enumerate(__lowercase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__lowercase ) __lowercase =i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __lowercase =i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __lowercase =i if len(__lowercase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def snake_case ( self : List[str] , __lowercase : Tuple ): """simple docstring""" __lowercase =''.join(__lowercase ).replace(__lowercase , ' ' ).strip() return out_string def snake_case ( self : List[Any] , __lowercase : List[Any] ): """simple docstring""" __lowercase =self.convert_ids_to_tokens(__lowercase ) __lowercase =''.join(__lowercase ).replace(__lowercase , ' ' ).strip() return out_string def snake_case ( self : int , __lowercase : Optional[int] ): """simple docstring""" return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) ) def snake_case ( self : str , __lowercase : Dict ): """simple docstring""" return self.reverse_vocab.get(__lowercase , self.unk_token ) def snake_case ( self : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase =[self.cls_token_id] __lowercase =[self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def snake_case ( self : Any , __lowercase : List[str] , __lowercase : Any=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def snake_case ( self : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any]=None , __lowercase : Optional[Any]=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1] def snake_case ( self : List[str] , __lowercase : List[str] , __lowercase : Tuple = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__lowercase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3) def snake_case ( self : Union[str, Any] , __lowercase : Dict ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def snake_case ( self : Optional[Any] , __lowercase : str ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def snake_case ( self : str , __lowercase : Optional[int] ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def snake_case ( self : Tuple , __lowercase : int ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__lowercase ) == 1: __lowercase =unicodedata.category(__lowercase ) if cat == "Zs": return True return False def snake_case ( self : List[str] , __lowercase : Optional[int] ): """simple docstring""" __lowercase ={} with io.open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __lowercase =line.rstrip('\n' ) __lowercase =int(__lowercase ) return token_to_idx def snake_case ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] = None ): """simple docstring""" __lowercase =0 if os.path.isdir(__lowercase ): __lowercase =os.path.join( __lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: __lowercase =(filename_prefix + '-' if filename_prefix else '') + save_directory with open(__lowercase , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) __lowercase =token_index writer.write(token + '\n' ) index += 1 __lowercase =os.path.join(__lowercase , 'sentencepiece.bpe.model' ) with open(__lowercase , 'wb' ) as fi: __lowercase =self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (vocab_file,)
141
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) __lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def A_ ( _lowerCAmelCase ) -> Any: for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: UpperCamelCase : Optional[Any] = model_type_to_module_name(__snake_case ) UpperCamelCase : Optional[int] = importlib.import_module(F""".{module_name}""" , "transformers.models" ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , "__name__" , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. UpperCamelCase : Tuple = importlib.import_module("transformers" ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def A_ ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> List[str]: UpperCamelCase : Dict = get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(__snake_case , encoding="utf-8" ) as reader: return json.load(__snake_case ) class A__ : def __init__( self ): '''simple docstring''' raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(A_ ) def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' UpperCamelCase : Tuple = kwargs.pop("config" , A_ ) UpperCamelCase : str = kwargs.pop("trust_remote_code" , A_ ) UpperCamelCase : str = True UpperCamelCase , UpperCamelCase : Optional[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A_ , **A_ ) UpperCamelCase : List[Any] = config_dict.get("feature_extractor_type" , A_ ) UpperCamelCase : Any = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): UpperCamelCase : List[Any] = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(A_ , A_ ): UpperCamelCase : Any = AutoConfig.from_pretrained(A_ , **A_ ) # It could be in `config.feature_extractor_type`` UpperCamelCase : Tuple = getattr(A_ , "feature_extractor_type" , A_ ) if hasattr(A_ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: UpperCamelCase : List[str] = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: UpperCamelCase : int = feature_extractor_class_from_name(A_ ) UpperCamelCase : Any = feature_extractor_auto_map is not None UpperCamelCase : Optional[Any] = feature_extractor_class is not None or type(A_ ) in FEATURE_EXTRACTOR_MAPPING UpperCamelCase : Dict = resolve_trust_remote_code( A_ , A_ , A_ , A_ ) if has_remote_code and trust_remote_code: UpperCamelCase : str = get_class_from_dynamic_module( A_ , A_ , **A_ ) UpperCamelCase : Tuple = kwargs.pop("code_revision" , A_ ) if os.path.isdir(A_ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(A_ , **A_ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(A_ , **A_ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(A_ ) in FEATURE_EXTRACTOR_MAPPING: UpperCamelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(A_ )] return feature_extractor_class.from_dict(A_ , **A_ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(A_ , A_ )
52
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor A__ : Any = logging.get_logger(__name__) class _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" def __init__( self : Optional[int], *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] ): '''simple docstring''' warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''', lowerCamelCase, ) super().__init__(*lowerCamelCase, **lowerCamelCase )
207
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _UpperCamelCase = logging.getLogger(__name__) def _lowercase ( lowercase__ , lowercase__ ): # save results if os.path.exists(__snake_case ): if os.path.exists(os.path.join(__snake_case , '''config.json''' ) ) and os.path.isfile( os.path.join(__snake_case , '''config.json''' ) ): os.remove(os.path.join(__snake_case , '''config.json''' ) ) if os.path.exists(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__snake_case , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) else: os.makedirs(__snake_case ) model.save_pretrained(__snake_case ) def _lowercase ( lowercase__ , lowercase__=False ): __lowerCAmelCase : Union[str, Any] = 2 if unlogit: __lowerCAmelCase : int = torch.pow(__snake_case , __snake_case ) __lowerCAmelCase : List[str] = p * torch.log(__snake_case ) __lowerCAmelCase : List[str] = 0 return -plogp.sum(dim=-1 ) def _lowercase ( lowercase__ ): logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(__snake_case ) ) ) ) for row in range(len(__snake_case ) ): if tensor.dtype != torch.long: logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=True , lowercase__=True , lowercase__=None , lowercase__=False ): __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads __lowerCAmelCase : Tuple = torch.zeros(__snake_case , __snake_case ).to(args.device ) __lowerCAmelCase : List[str] = torch.zeros(__snake_case , __snake_case ).to(args.device ) if head_mask is None: __lowerCAmelCase : Tuple = torch.ones(__snake_case , __snake_case ).to(args.device ) head_mask.requires_grad_(requires_grad=__snake_case ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[int] = 0.0 __lowerCAmelCase : Any = 0.0 for step, inputs in enumerate(tqdm(__snake_case , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): __lowerCAmelCase : int = tuple(t.to(args.device ) for t in inputs ) ((__lowerCAmelCase ), ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __lowerCAmelCase : List[Any] = model(__snake_case , labels=__snake_case , head_mask=__snake_case ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__snake_case ): __lowerCAmelCase : List[str] = entropy(attn.detach() , __snake_case ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__snake_case ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __lowerCAmelCase : List[Any] = 2 __lowerCAmelCase : Any = torch.pow(torch.pow(__snake_case , __snake_case ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: __lowerCAmelCase : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__snake_case ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__snake_case ) logger.info('''Head ranked by importance scores''' ) __lowerCAmelCase : Union[str, Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __lowerCAmelCase : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) __lowerCAmelCase : Tuple = head_ranks.view_as(__snake_case ) print_ad_tensor(__snake_case ) return attn_entropy, head_importance, total_loss def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Tuple = compute_heads_importance(__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case ) __lowerCAmelCase : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __snake_case , original_score * args.masking_threshold ) __lowerCAmelCase : Tuple = torch.ones_like(__snake_case ) __lowerCAmelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __lowerCAmelCase : Tuple = original_score while current_score >= original_score * args.masking_threshold: __lowerCAmelCase : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __lowerCAmelCase : List[Any] = float('''Inf''' ) __lowerCAmelCase : Any = head_importance.view(-1 ).sort()[1] if len(__snake_case ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads __lowerCAmelCase : Tuple = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) __lowerCAmelCase : int = new_head_mask.view(-1 ) __lowerCAmelCase : Any = 0.0 __lowerCAmelCase : Optional[int] = new_head_mask.view_as(__snake_case ) __lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() print_ad_tensor(__snake_case ) # Compute metric and head importance again __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , head_mask=__snake_case ) __lowerCAmelCase : Tuple = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('''Final head mask''' ) print_ad_tensor(__snake_case ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = datetime.now() __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case ) __lowerCAmelCase : str = 1 / loss __lowerCAmelCase : Dict = datetime.now() - before_time __lowerCAmelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() ) __lowerCAmelCase : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__snake_case ) ) } for k, v in heads_to_prune.items(): if isinstance(__snake_case , __snake_case ): __lowerCAmelCase : List[str] = [ v, ] assert sum(len(__snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__snake_case ) __lowerCAmelCase : List[str] = sum(p.numel() for p in model.parameters() ) __lowerCAmelCase : Dict = datetime.now() __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case , actually_pruned=__snake_case , ) __lowerCAmelCase : Optional[Any] = 1 / loss __lowerCAmelCase : Tuple = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __snake_case , __snake_case , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __snake_case , __snake_case ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 ) save_model(__snake_case , args.output_dir ) def _lowercase ( ): __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__snake_case , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__snake_case , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__snake_case , type=__snake_case , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__snake_case , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__snake_case , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__snake_case , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__snake_case , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=1_2_8 , type=__snake_case , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__snake_case , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__snake_case , default=4_2 ) parser.add_argument('''--local_rank''' , type=__snake_case , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) __lowerCAmelCase : Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __lowerCAmelCase : Any = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) __lowerCAmelCase : str = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __lowerCAmelCase : Optional[Any] = torch.device('''cuda''' , args.local_rank ) __lowerCAmelCase : Dict = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __lowerCAmelCase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __lowerCAmelCase : Optional[Any] = nn.parallel.DistributedDataParallel( __snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__snake_case ) elif args.n_gpu > 1: __lowerCAmelCase : List[str] = nn.DataParallel(__snake_case ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__snake_case ) torch.save(__snake_case , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Prepare dataset __lowerCAmelCase : List[str] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __lowerCAmelCase : Optional[Any] = (torch.from_numpy(__snake_case ),) __lowerCAmelCase : int = TensorDataset(*__snake_case ) __lowerCAmelCase : int = RandomSampler(__snake_case ) __lowerCAmelCase : Dict = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__snake_case , __snake_case , __snake_case ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __lowerCAmelCase : Tuple = mask_heads(__snake_case , __snake_case , __snake_case ) prune_heads(__snake_case , __snake_case , __snake_case , __snake_case ) if __name__ == "__main__": main()
275
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
0
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py a_ = """src/diffusers""" a_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. a_ = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) a_ = spec.loader.load_module() def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> int: """simple docstring""" return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , __snake_case ) is not None def _a ( UpperCamelCase_ : Any ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = object_name.split("." ) lowerCAmelCase__ = 0 # First let's find the module where our object lives. lowerCAmelCase__ = parts[i] while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , F"{module}.py" ) ): i += 1 if i < len(__snake_case ): lowerCAmelCase__ = os.path.join(__snake_case , parts[i] ) if i >= len(__snake_case ): raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." ) with open(os.path.join(__snake_case , F"{module}.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() # Now let's find the class / func in the code! lowerCAmelCase__ = "" lowerCAmelCase__ = 0 for name in parts[i + 1 :]: while ( line_index < len(__snake_case ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__snake_case ): raise ValueError(F" {object_name} does not match any function or class in {module}." ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCAmelCase__ = line_index while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCAmelCase__ = lines[start_index:line_index] return "".join(__snake_case ) a_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') a_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') a_ = re.compile(r'''<FILL\s+[^>]*>''') def _a ( UpperCamelCase_ : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase__ = code.split("\n" ) lowerCAmelCase__ = 0 while idx < len(__snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__snake_case ): return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0] return "" def _a ( UpperCamelCase_ : Any ) -> int: """simple docstring""" lowerCAmelCase__ = len(get_indent(__snake_case ) ) > 0 if has_indent: lowerCAmelCase__ = F"class Bla:\n{code}" lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__snake_case ) lowerCAmelCase__ = black.format_str(__snake_case , mode=__snake_case ) lowerCAmelCase__ , lowerCAmelCase__ = style_docstrings_in_code(__snake_case ) return result[len("class Bla:\n" ) :] if has_indent else result def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [] lowerCAmelCase__ = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__snake_case ): lowerCAmelCase__ = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = search.groups() lowerCAmelCase__ = find_code_in_diffusers(__snake_case ) lowerCAmelCase__ = get_indent(__snake_case ) lowerCAmelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCAmelCase__ = theoretical_indent lowerCAmelCase__ = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCAmelCase__ = True while line_index < len(__snake_case ) and should_continue: line_index += 1 if line_index >= len(__snake_case ): break lowerCAmelCase__ = lines[line_index] lowerCAmelCase__ = _should_continue(__snake_case , __snake_case ) and re.search(F"^{indent}# End copy" , __snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCAmelCase__ = lines[start_index:line_index] lowerCAmelCase__ = "".join(__snake_case ) # Remove any nested `Copied from` comments to avoid circular copies lowerCAmelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__snake_case ) is None] lowerCAmelCase__ = "\n".join(__snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(__snake_case ) > 0: lowerCAmelCase__ = replace_pattern.replace("with" , "" ).split("," ) lowerCAmelCase__ = [_re_replace_pattern.search(__snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = pattern.groups() lowerCAmelCase__ = re.sub(__snake_case , __snake_case , __snake_case ) if option.strip() == "all-casing": lowerCAmelCase__ = re.sub(obja.lower() , obja.lower() , __snake_case ) lowerCAmelCase__ = re.sub(obja.upper() , obja.upper() , __snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCAmelCase__ = blackify(lines[start_index - 1] + theoretical_code ) lowerCAmelCase__ = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowerCAmelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCAmelCase__ = start_index + 1 if overwrite and len(__snake_case ) > 0: # Warn the user a file has been modified. print(F"Detected changes, rewriting {filename}." ) with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(__snake_case ) return diffs def _a ( UpperCamelCase_ : bool = False ) -> Dict: """simple docstring""" lowerCAmelCase__ = glob.glob(os.path.join(__snake_case , "**/*.py" ) , recursive=__snake_case ) lowerCAmelCase__ = [] for filename in all_files: lowerCAmelCase__ = is_copy_consistent(__snake_case , __snake_case ) diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(__snake_case ) > 0: lowerCAmelCase__ = "\n".join(__snake_case ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a_ = parser.parse_args() check_copies(args.fix_and_overwrite)
340
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
0
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A__ : List[str] =input('''Enter image url: ''').strip() print(F"""Downloading image from {url} ...""") A__ : Tuple =BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A__ : Optional[Any] =soup.find('''meta''', {'''property''': '''og:image'''})["""content"""] A__ : Dict =requests.get(image_url).content A__ : int =F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, '''wb''') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
70
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
0
"""simple docstring""" from __future__ import annotations lowerCAmelCase : Any = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> tuple[list[list[int]], list[list[int]]]: lowerCamelCase = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the reference grid lowerCamelCase = 1 lowerCamelCase = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the action grid lowerCamelCase = init[0] lowerCamelCase = init[1] lowerCamelCase = 0 lowerCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell lowerCamelCase = [[f, g, x, y]] lowerCamelCase = False # flag that is set when search is complete lowerCamelCase = False # flag set if we can't find expand while not found and not resign: if len(__snake_case ) == 0: raise ValueError("""Algorithm is unable to find solution""" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCamelCase = cell.pop() lowerCamelCase = next_cell[2] lowerCamelCase = next_cell[3] lowerCamelCase = next_cell[1] if x == goal[0] and y == goal[1]: lowerCamelCase = True else: for i in range(len(__snake_case ) ): # to try out different valid actions lowerCamelCase = x + DIRECTIONS[i][0] lowerCamelCase = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCamelCase = g + cost lowerCamelCase = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCamelCase = 1 lowerCamelCase = i lowerCamelCase = [] lowerCamelCase = goal[0] lowerCamelCase = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCamelCase = x - DIRECTIONS[action[x][y]][0] lowerCamelCase = y - DIRECTIONS[action[x][y]][1] lowerCamelCase = xa lowerCamelCase = ya invpath.append([x, y] ) lowerCamelCase = [] for i in range(len(__snake_case ) ): path.append(invpath[len(__snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": lowerCAmelCase : Dict = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] lowerCAmelCase : Tuple = [0, 0] # all coordinates are given in format [y,x] lowerCAmelCase : Optional[int] = [len(grid) - 1, len(grid[0]) - 1] lowerCAmelCase : List[Any] = 1 # the cost map which pushes the path closer to the goal lowerCAmelCase : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): lowerCAmelCase : int = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map lowerCAmelCase : int = 99 lowerCAmelCase : str = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
291
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : str = UnCLIPImageVariationPipeline _lowerCAmelCase : Optional[Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} _lowerCAmelCase : List[str] = IMAGE_VARIATION_BATCH_PARAMS _lowerCAmelCase : Optional[int] = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] _lowerCAmelCase : int = False @property def snake_case ( self ): """simple docstring""" return 32 @property def snake_case ( self ): """simple docstring""" return 32 @property def snake_case ( self ): """simple docstring""" return self.time_input_dim @property def snake_case ( self ): """simple docstring""" return self.time_input_dim * 4 @property def snake_case ( self ): """simple docstring""" return 1_00 @property def snake_case ( self ): """simple docstring""" snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(lowerCAmelCase ) @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) snake_case = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(lowerCAmelCase ) @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) snake_case = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } snake_case = UnCLIPTextProjModel(**lowerCAmelCase ) return model @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) snake_case = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } snake_case = UNetaDConditionModel(**lowerCAmelCase ) return model @property def snake_case ( self ): """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) snake_case = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def snake_case ( self ): """simple docstring""" torch.manual_seed(1 ) snake_case = UNetaDModel(**self.dummy_super_res_kwargs ) return model def snake_case ( self ): """simple docstring""" snake_case = self.dummy_decoder snake_case = self.dummy_text_proj snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_super_res_first snake_case = self.dummy_super_res_last snake_case = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , ) snake_case = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , ) snake_case = CLIPImageProcessor(crop_size=32 , size=32 ) snake_case = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def snake_case ( self , lowerCAmelCase , lowerCAmelCase=0 , lowerCAmelCase=True ): """simple docstring""" snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('mps' ): snake_case = torch.manual_seed(lowerCAmelCase ) else: snake_case = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) if pil_image: snake_case = input_image * 0.5 + 0.5 snake_case = input_image.clamp(0 , 1 ) snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case = DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def snake_case ( self ): """simple docstring""" snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowerCAmelCase ) snake_case = pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = pipe(**lowerCAmelCase ) snake_case = output.images snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = pipe( **lowerCAmelCase , return_dict=lowerCAmelCase , )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array( [ 0.99_97, 0.00_02, 0.99_97, 0.99_97, 0.99_69, 0.00_23, 0.99_97, 0.99_69, 0.99_70, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): """simple docstring""" snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowerCAmelCase ) snake_case = pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = pipe(**lowerCAmelCase ) snake_case = output.images snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = pipe( **lowerCAmelCase , return_dict=lowerCAmelCase , )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): """simple docstring""" snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowerCAmelCase ) snake_case = pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = [ pipeline_inputs['image'], pipeline_inputs['image'], ] snake_case = pipe(**lowerCAmelCase ) snake_case = output.images snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] snake_case = pipe( **lowerCAmelCase , return_dict=lowerCAmelCase , )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) snake_case = np.array( [ 0.99_97, 0.99_89, 0.00_08, 0.00_21, 0.99_60, 0.00_18, 0.00_14, 0.00_02, 0.99_33, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self ): """simple docstring""" snake_case = torch.device('cpu' ) class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : Union[str, Any] = 1 snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowerCAmelCase ) snake_case = pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) snake_case = torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) snake_case = pipe.decoder.dtype snake_case = 1 snake_case = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) snake_case = pipe.prepare_latents( lowerCAmelCase , dtype=lowerCAmelCase , device=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , scheduler=DummyScheduler() ) snake_case = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) snake_case = pipe.prepare_latents( lowerCAmelCase , dtype=lowerCAmelCase , device=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , scheduler=DummyScheduler() ) snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) snake_case = pipe( **lowerCAmelCase , decoder_latents=lowerCAmelCase , super_res_latents=lowerCAmelCase ).images snake_case = self.get_dummy_inputs(lowerCAmelCase , pil_image=lowerCAmelCase ) # Don't pass image, instead pass embedding snake_case = pipeline_inputs.pop('image' ) snake_case = pipe.image_encoder(lowerCAmelCase ).image_embeds snake_case = pipe( **lowerCAmelCase , decoder_latents=lowerCAmelCase , super_res_latents=lowerCAmelCase , image_embeddings=lowerCAmelCase , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def snake_case ( self ): """simple docstring""" snake_case = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor snake_case = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=lowerCAmelCase , expected_max_diff=lowerCAmelCase ) @skip_mps def snake_case ( self ): """simple docstring""" snake_case = torch_device == 'cpu' snake_case = True snake_case = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , additional_params_copy_to_batched_inputs=lowerCAmelCase , ) def snake_case ( self ): """simple docstring""" snake_case = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes snake_case = [2, 3] self._test_inference_batch_consistent( batch_sizes=lowerCAmelCase , additional_params_copy_to_batched_inputs=lowerCAmelCase , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=lowerCAmelCase ) @skip_mps def snake_case ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def snake_case ( self ): """simple docstring""" return super().test_save_load_local() @skip_mps def snake_case ( self ): """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' ) snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy' ) snake_case = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa ) snake_case = pipeline.to(lowerCAmelCase ) pipeline.set_progress_bar_config(disable=lowerCAmelCase ) snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case = pipeline( lowerCAmelCase , generator=lowerCAmelCase , output_type='np' , ) snake_case = output.images[0] assert image.shape == (2_56, 2_56, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase , 15 )
150
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
0
"""simple docstring""" from collections.abc import Callable import numpy as np def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = int(np.ceil((x_end - xa) / step_size ) ) _lowerCamelCase : Optional[Any] = np.zeros((n + 1,) ) _lowerCamelCase : List[Any] = ya _lowerCamelCase : List[Any] = xa for k in range(__snake_case ): _lowerCamelCase : Union[str, Any] = y[k] + step_size * ode_func(__snake_case , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
96
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
0
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "x" , _SCREAMING_SNAKE_CASE = 10**-10 , _SCREAMING_SNAKE_CASE = 1 , ) -> complex: snake_case_ = symbols(__snake_case ) snake_case_ = lambdify(__snake_case , __snake_case ) snake_case_ = lambdify(__snake_case , diff(__snake_case , __snake_case ) ) snake_case_ = starting_point while True: if diff_function(__snake_case ) != 0: snake_case_ = prev_guess - multiplicity * func(__snake_case ) / diff_function( __snake_case ) else: raise ZeroDivisionError("""Could not find root""" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess snake_case_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
347
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __a = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Tuple = ['pixel_values'] def __init__( self: int , snake_case: Tuple = True , snake_case: List[Any] = None , snake_case: Union[str, Any] = PILImageResampling.BICUBIC , snake_case: Union[str, Any] = True , snake_case: Tuple = None , snake_case: Any = True , snake_case: Union[str, Any] = 1 / 255 , snake_case: Optional[int] = True , snake_case: int = None , snake_case: List[str] = None , snake_case: Optional[int] = True , **snake_case: str , ) -> List[Any]: super().__init__(**snake_case ) snake_case_ :Tuple = size if size is not None else {"""shortest_edge""": 224} snake_case_ :Dict = get_size_dict(snake_case , default_to_square=snake_case ) snake_case_ :Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} snake_case_ :Tuple = get_size_dict(snake_case , default_to_square=snake_case , param_name="""crop_size""" ) snake_case_ :Optional[Any] = do_resize snake_case_ :Tuple = size snake_case_ :int = resample snake_case_ :Optional[int] = do_center_crop snake_case_ :Optional[Any] = crop_size snake_case_ :List[str] = do_rescale snake_case_ :List[str] = rescale_factor snake_case_ :Optional[Any] = do_normalize snake_case_ :Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ :int = do_convert_rgb def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[str] , snake_case: Optional[Any] , snake_case: List[Any] = PILImageResampling.BICUBIC , snake_case: Optional[Any] = None , **snake_case: Dict , ) -> Dict: snake_case_ :int = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) snake_case_ :Tuple = get_resize_output_image_size(snake_case , size=size["""shortest_edge"""] , default_to_square=snake_case ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def lowerCAmelCase_ ( self: Tuple , snake_case: int , snake_case: Union[str, Any] , snake_case: Union[str, Any] = None , **snake_case: Optional[Any] , ) -> str: snake_case_ :List[str] = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(snake_case , size=(size["""height"""], size["""width"""]) , data_format=snake_case , **snake_case ) def lowerCAmelCase_ ( self: str , snake_case: int , snake_case: Optional[Any] , snake_case: List[Any] = None , **snake_case: Union[str, Any] , ) -> str: return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Dict , snake_case: Optional[Any] , snake_case: Optional[Any] , snake_case: int = None , **snake_case: Union[str, Any] , ) -> Any: return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Any , snake_case: Tuple = None , snake_case: Optional[int] = None , snake_case: Union[str, Any] = None , snake_case: Any = None , snake_case: int = None , snake_case: Optional[int] = None , snake_case: List[Any] = None , snake_case: int = None , snake_case: Optional[Any] = None , snake_case: Union[str, Any] = None , snake_case: str = None , snake_case: List[str] = None , snake_case: List[str] = ChannelDimension.FIRST , **snake_case: Dict , ) -> Tuple: snake_case_ :int = do_resize if do_resize is not None else self.do_resize snake_case_ :int = size if size is not None else self.size snake_case_ :Tuple = get_size_dict(snake_case , param_name="""size""" , default_to_square=snake_case ) snake_case_ :Tuple = resample if resample is not None else self.resample snake_case_ :str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ :Any = crop_size if crop_size is not None else self.crop_size snake_case_ :List[Any] = get_size_dict(snake_case , param_name="""crop_size""" , default_to_square=snake_case ) snake_case_ :Dict = do_rescale if do_rescale is not None else self.do_rescale snake_case_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean snake_case_ :Optional[Any] = image_std if image_std is not None else self.image_std snake_case_ :int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ :Optional[Any] = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ :List[Any] = [convert_to_rgb(snake_case ) for image in images] # All transformations expect numpy arrays. snake_case_ :Union[str, Any] = [to_numpy_array(snake_case ) for image in images] if do_resize: snake_case_ :Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] if do_center_crop: snake_case_ :Tuple = [self.center_crop(image=snake_case , size=snake_case ) for image in images] if do_rescale: snake_case_ :Dict = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: snake_case_ :Union[str, Any] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] snake_case_ :str = [to_channel_dimension_format(snake_case , snake_case ) for image in images] snake_case_ :List[Any] = {"""pixel_values""": images} return BatchFeature(data=snake_case , tensor_type=snake_case )
66
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
0
'''simple docstring''' import torch from diffusers import DiffusionPipeline class lowerCAmelCase ( lowerCamelCase__ ): def __init__( self : int , __lowercase : Optional[int] , __lowercase : Any ): """simple docstring""" super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase ) def __call__( self : int ): """simple docstring""" __lowercase =torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __lowercase =1 __lowercase =self.unet(__lowercase , __lowercase ).sample __lowercase =self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample __lowercase =scheduler_output - scheduler_output + torch.ones_like(__lowercase ) return result
141
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __lowerCamelCase : Tuple = logging.get_logger(__name__) class A__ : def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = question_encoder UpperCamelCase : Union[str, Any] = generator UpperCamelCase : int = self.question_encoder def __UpperCamelCase( self , A_ ): '''simple docstring''' if os.path.isfile(A_ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(A_ , exist_ok=A_ ) UpperCamelCase : Tuple = os.path.join(A_ , "question_encoder_tokenizer" ) UpperCamelCase : Optional[Any] = os.path.join(A_ , "generator_tokenizer" ) self.question_encoder.save_pretrained(A_ ) self.generator.save_pretrained(A_ ) @classmethod def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer UpperCamelCase : List[str] = kwargs.pop("config" , A_ ) if config is None: UpperCamelCase : Tuple = RagConfig.from_pretrained(A_ ) UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( A_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) UpperCamelCase : str = AutoTokenizer.from_pretrained( A_ , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=A_ , generator=A_ ) def __call__( self , *A_ , **A_ ): '''simple docstring''' return self.current_tokenizer(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.generator.batch_decode(*A_ , **A_ ) def __UpperCamelCase( self , *A_ , **A_ ): '''simple docstring''' return self.generator.decode(*A_ , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.question_encoder def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.generator def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = "longest" , A_ = None , A_ = True , **A_ , ): '''simple docstring''' warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , A_ , ) if max_length is None: UpperCamelCase : Tuple = self.current_tokenizer.model_max_length UpperCamelCase : Dict = self( A_ , add_special_tokens=A_ , return_tensors=A_ , max_length=A_ , padding=A_ , truncation=A_ , **A_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCamelCase : Union[str, Any] = self.current_tokenizer.model_max_length UpperCamelCase : Dict = self( text_target=A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , max_length=A_ , truncation=A_ , **A_ , ) UpperCamelCase : Dict = labels["input_ids"] return model_inputs
52
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Any = logging.get_logger(__name__) A__ : Optional[Any] = { """huggingface/time-series-transformer-tourism-monthly""": ( """https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json""" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ = 'time_series_transformer' lowercase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Union[str, Any], lowerCamelCase : Optional[Any] = None, lowerCamelCase : Optional[Any] = None, lowerCamelCase : Any = "student_t", lowerCamelCase : str = "nll", lowerCamelCase : Any = 1, lowerCamelCase : List[str] = [1, 2, 3, 4, 5, 6, 7], lowerCamelCase : List[str] = "mean", lowerCamelCase : Dict = 0, lowerCamelCase : str = 0, lowerCamelCase : List[str] = 0, lowerCamelCase : Tuple = 0, lowerCamelCase : Optional[Any] = None, lowerCamelCase : List[Any] = None, lowerCamelCase : Dict = 32, lowerCamelCase : int = 32, lowerCamelCase : Dict = 2, lowerCamelCase : Union[str, Any] = 2, lowerCamelCase : Union[str, Any] = 2, lowerCamelCase : Optional[int] = 2, lowerCamelCase : Dict = True, lowerCamelCase : Dict = "gelu", lowerCamelCase : Optional[int] = 64, lowerCamelCase : Tuple = 0.1, lowerCamelCase : Dict = 0.1, lowerCamelCase : Union[str, Any] = 0.1, lowerCamelCase : int = 0.1, lowerCamelCase : Dict = 0.1, lowerCamelCase : Union[str, Any] = 100, lowerCamelCase : Optional[Any] = 0.02, lowerCamelCase : List[Any]=True, **lowerCamelCase : Optional[int], ): '''simple docstring''' lowercase__ = prediction_length lowercase__ = context_length or prediction_length lowercase__ = distribution_output lowercase__ = loss lowercase__ = input_size lowercase__ = num_time_features lowercase__ = lags_sequence lowercase__ = scaling lowercase__ = num_dynamic_real_features lowercase__ = num_static_real_features lowercase__ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = cardinality else: lowercase__ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCamelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = embedding_dimension else: lowercase__ = [min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ = num_parallel_samples # Transformer architecture configuration lowercase__ = input_size * len(lowerCamelCase ) + self._number_of_features lowercase__ = d_model lowercase__ = encoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = encoder_ffn_dim lowercase__ = decoder_ffn_dim lowercase__ = encoder_layers lowercase__ = decoder_layers lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = activation_function lowercase__ = init_std lowercase__ = use_cache super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase ) @property def lowercase__ ( self : int ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
207
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class __lowercase (lowerCamelCase__ ): _UpperCamelCase = 42 _UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class __lowercase (lowerCamelCase__ ): _UpperCamelCase = 42 _UpperCamelCase = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
275
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
0
import os import time import numpy as np import onnxruntime as ort a_ = """1""" a_ = """0""" a_ = """1""" a_ = ort.SessionOptions() a_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') a_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] a_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) a_ = ort.RunOptions() a_ = 128 a_ = 1 a_ = np.ones((batch, sequence), dtype=np.intaa) a_ = np.ones((batch, sequence), dtype=np.intaa) a_ = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') a_ = time.time() a_ = 2000 a_ = {} for iter in range(max_iters): a_ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
340
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ) -> str: _lowerCAmelCase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() _lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) _lowerCAmelCase = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } _lowerCAmelCase = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 1_60_00, """return_attention_mask""": False, """do_normalize""": True, } _lowerCAmelCase = tempfile.mkdtemp() _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase = os.path.join(self.tmpdirname , __snake_case ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__snake_case ) + """\n""" ) with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__snake_case ) + """\n""" ) # load decoder from hub _lowerCAmelCase = """hf-internal-testing/ngram-beam-search-decoder""" def lowercase__ ( self : Union[str, Any] , **__snake_case : List[Any] ) -> Tuple: _lowerCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(__snake_case ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase__ ( self : Union[str, Any] , **__snake_case : str ) -> List[str]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase__ ( self : int , **__snake_case : str ) -> Tuple: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case ) def lowercase__ ( self : Any ) -> str: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __snake_case ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __snake_case ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __snake_case ) def lowercase__ ( self : Optional[int] ) -> List[str]: _lowerCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase__ ( self : Optional[int] ) -> Tuple: _lowerCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(__snake_case , """include""" ): WavaVecaProcessorWithLM( tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase__ ( self : List[Any] ) -> Optional[Any]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = floats_list((3, 10_00) ) _lowerCAmelCase = feature_extractor(__snake_case , return_tensors="""np""" ) _lowerCAmelCase = processor(__snake_case , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : List[str] ) -> int: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = """This is a test string""" _lowerCAmelCase = processor(text=__snake_case ) _lowerCAmelCase = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : str , __snake_case : int=(2, 10, 16) , __snake_case : Optional[int]=77 ) -> Optional[Any]: np.random.seed(__snake_case ) return np.random.rand(*__snake_case ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _lowerCAmelCase = processor.decode(__snake_case ) _lowerCAmelCase = decoder.decode_beams(__snake_case )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _lowerCAmelCase = processor.batch_decode(__snake_case ) else: with get_context(__snake_case ).Pool() as pool: _lowerCAmelCase = processor.batch_decode(__snake_case , __snake_case ) _lowerCAmelCase = list(__snake_case ) with get_context("""fork""" ).Pool() as p: _lowerCAmelCase = decoder.decode_beams_batch(__snake_case , __snake_case ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__snake_case , decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text ) self.assertListEqual(__snake_case , decoded_processor.logit_score ) self.assertListEqual(__snake_case , decoded_processor.lm_score ) def lowercase__ ( self : Optional[int] ) -> Any: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = self._get_dummy_logits() _lowerCAmelCase = 15 _lowerCAmelCase = -20.0 _lowerCAmelCase = -4.0 _lowerCAmelCase = processor.batch_decode( __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , ) _lowerCAmelCase = decoded_processor_out.text _lowerCAmelCase = list(__snake_case ) with get_context("""fork""" ).Pool() as pool: _lowerCAmelCase = decoder.decode_beams_batch( __snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , ) _lowerCAmelCase = [d[0][0] for d in decoded_decoder_out] _lowerCAmelCase = [d[0][2] for d in decoded_decoder_out] _lowerCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__snake_case , __snake_case ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case ) self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __snake_case , atol=1E-3 ) ) self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __snake_case , atol=1E-3 ) ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) _lowerCAmelCase = self._get_dummy_logits() _lowerCAmelCase = 2.0 _lowerCAmelCase = 5.0 _lowerCAmelCase = -20.0 _lowerCAmelCase = True _lowerCAmelCase = processor.batch_decode( __snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , ) _lowerCAmelCase = decoded_processor_out.text _lowerCAmelCase = list(__snake_case ) decoder.reset_params( alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , ) with get_context("""fork""" ).Pool() as pool: _lowerCAmelCase = decoder.decode_beams_batch( __snake_case , __snake_case , ) _lowerCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__snake_case , __snake_case ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case ) _lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __snake_case ) def lowercase__ ( self : Optional[int] ) -> str: _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] _lowerCAmelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() _lowerCAmelCase = os.listdir(__snake_case ) _lowerCAmelCase = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__snake_case , __snake_case ) def lowercase__ ( self : Tuple ) -> Any: _lowerCAmelCase = snapshot_download("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(__snake_case ) _lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] _lowerCAmelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() _lowerCAmelCase = os.listdir(__snake_case ) _lowerCAmelCase = os.listdir(__snake_case ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__snake_case , __snake_case ) def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = floats_list((3, 10_00) ) _lowerCAmelCase = processor_wavaveca(__snake_case , return_tensors="""np""" ) _lowerCAmelCase = processor_auto(__snake_case , return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) _lowerCAmelCase = self._get_dummy_logits() _lowerCAmelCase = processor_wavaveca.batch_decode(__snake_case ) _lowerCAmelCase = processor_auto.batch_decode(__snake_case ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: _lowerCAmelCase = self.get_feature_extractor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_decoder() _lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) @staticmethod def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[Any] ) -> Optional[Any]: _lowerCAmelCase = [d[key] for d in offsets] return retrieved_list def lowercase__ ( self : int ) -> Optional[Any]: _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = self._get_dummy_logits()[0] _lowerCAmelCase = processor.decode(__snake_case , output_word_offsets=__snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(__snake_case , __snake_case ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) _lowerCAmelCase = self._get_dummy_logits() _lowerCAmelCase = processor.batch_decode(__snake_case , output_word_offsets=__snake_case ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(__snake_case , __snake_case ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self : str ) -> Dict: import torch _lowerCAmelCase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case ) _lowerCAmelCase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) ) _lowerCAmelCase = iter(__snake_case ) _lowerCAmelCase = next(__snake_case ) _lowerCAmelCase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) _lowerCAmelCase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _lowerCAmelCase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values with torch.no_grad(): _lowerCAmelCase = model(__snake_case ).logits.cpu().numpy() _lowerCAmelCase = processor.decode(logits[0] , output_word_offsets=__snake_case ) _lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _lowerCAmelCase = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] _lowerCAmelCase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case ) self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text ) # output times _lowerCAmelCase = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) ) _lowerCAmelCase = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) ) # fmt: off _lowerCAmelCase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) _lowerCAmelCase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) ) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
70
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
0
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor a_ = logging.get_logger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : List[str] , *a : Any , **a : Tuple ) -> None: """simple docstring""" warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , a , ) super().__init__(*a , **a )
76
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): # initialize config if "resnet-50" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50") elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101") else: raise ValueError("Model name should include either resnet50 or resnet101") SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a) # set label attributes SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE : Union[str, Any] = 250 else: SCREAMING_SNAKE_CASE : Union[str, Any] = 91 SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r")) SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase__ ( _a): # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE : Union[str, Any] = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", )) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", )) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", )) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ]) return rename_keys def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : str = state_dict.pop(_a) SCREAMING_SNAKE_CASE : int = val def lowerCamelCase__ ( _a , _a=False): SCREAMING_SNAKE_CASE : Optional[Any] = "" if is_panoptic: SCREAMING_SNAKE_CASE : Optional[int] = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : int = in_proj_bias[:256] SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight") SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw) return im @torch.no_grad() def lowerCamelCase__ ( _a , _a=None , _a=False): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a) # load original model from torch hub SCREAMING_SNAKE_CASE : Union[str, Any] = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"Converting model {model_name}...") SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval() SCREAMING_SNAKE_CASE : Tuple = detr.state_dict() # rename keys for src, dest in create_rename_keys(_a): if is_panoptic: SCREAMING_SNAKE_CASE : List[str] = "detr." + src rename_key(_a , _a , _a) # query, key and value matrices need special treatment read_in_q_k_v(_a , is_panoptic=_a) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Optional[int] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : List[Any] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Any = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a) model.load_state_dict(_a) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a) SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt") SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a) SCREAMING_SNAKE_CASE : Any = model(_a) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(_a).mkdir(exist_ok=_a) model.save_pretrained(_a) processor.save_pretrained(_a) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
1
import os def lowerCamelCase__ ( ): with open(os.path.dirname(_a) + "/p022_names.txt") as file: SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0]) SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",") names.sort() SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Dict = 0 for i, name in enumerate(_a): for letter in name: name_score += ord(_a) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE : str = 0 return total_score if __name__ == "__main__": print(solution())
76
import os def lowerCamelCase__ ( ): with open(os.path.dirname(_a) + "/p022_names.txt") as file: SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0]) SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",") names.sort() SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Dict = 0 for i, name in enumerate(_a): for letter in name: name_score += ord(_a) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE : str = 0 return total_score if __name__ == "__main__": print(solution())
76
1
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , _a=False , ): output_path.parent.mkdir(parents=_a , exist_ok=_a) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , ) else: export( _a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , ) @torch.no_grad() def lowerCamelCase__ ( _a , _a , _a , _a = False): SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Dict = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA") else: SCREAMING_SNAKE_CASE : Tuple = "cpu" SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_a , torch_dtype=_a).to(_a) SCREAMING_SNAKE_CASE : List[Any] = Path(_a) # TEXT ENCODER SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder.config.hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_a , dtype=torch.intaa)) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=_a , ) del pipeline.text_encoder # UNET SCREAMING_SNAKE_CASE : Dict = pipeline.unet.config.in_channels SCREAMING_SNAKE_CASE : str = pipeline.unet.config.sample_size SCREAMING_SNAKE_CASE : List[str] = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet , model_args=( torch.randn(2 , _a , _a , _a).to(device=_a , dtype=_a), torch.randn(2).to(device=_a , dtype=_a), torch.randn(2 , _a , _a).to(device=_a , dtype=_a), False, ) , output_path=_a , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=_a , use_external_data_format=_a , ) SCREAMING_SNAKE_CASE : List[Any] = str(unet_path.absolute().as_posix()) SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_a) SCREAMING_SNAKE_CASE : List[Any] = onnx.load(_a) # clean up existing tensor files shutil.rmtree(_a) os.mkdir(_a) # collate external tensor files into one onnx.save_model( _a , _a , save_as_external_data=_a , all_tensors_to_one_file=_a , location="weights.pb" , convert_attribute=_a , ) del pipeline.unet # VAE ENCODER SCREAMING_SNAKE_CASE : Any = pipeline.vae SCREAMING_SNAKE_CASE : Tuple = vae_encoder.config.in_channels SCREAMING_SNAKE_CASE : Optional[Any] = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder SCREAMING_SNAKE_CASE : str = lambda _a , _a: vae_encoder.encode(_a , _a)[0].sample() onnx_export( _a , model_args=( torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_a , ) # VAE DECODER SCREAMING_SNAKE_CASE : str = pipeline.vae SCREAMING_SNAKE_CASE : Tuple = vae_decoder.config.latent_channels SCREAMING_SNAKE_CASE : str = vae_decoder.config.out_channels # forward only through the decoder part SCREAMING_SNAKE_CASE : List[Any] = vae_encoder.decode onnx_export( _a , model_args=( torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_a , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: SCREAMING_SNAKE_CASE : int = pipeline.safety_checker SCREAMING_SNAKE_CASE : List[str] = safety_checker.config.vision_config.num_channels SCREAMING_SNAKE_CASE : List[Any] = safety_checker.config.vision_config.image_size SCREAMING_SNAKE_CASE : List[str] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , _a , _a , _a , ).to(device=_a , dtype=_a), torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=_a , ) del pipeline.safety_checker SCREAMING_SNAKE_CASE : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker") SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.feature_extractor else: SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder") , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder") , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder") , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet") , scheduler=pipeline.scheduler , safety_checker=_a , feature_extractor=_a , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(_a) print("ONNX pipeline saved to" , _a) del pipeline del onnx_pipeline SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline.from_pretrained(_a , provider="CPUExecutionProvider") print("ONNX pipeline is loadable") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') a_ = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
76
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
1
from __future__ import annotations class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , a : int = 0 ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = key def __UpperCamelCase ( self : Optional[int] , a : str , a : int ) -> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def __UpperCamelCase ( self : List[Any] , a : str , a : int ) -> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def __UpperCamelCase ( self : Optional[Any] , a : str , a : int = 0 ) -> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : Optional[Any] = "" for ch in content: ans += chr(ord(a ) ^ key ) return ans def __UpperCamelCase ( self : int , a : str , a : int = 0 ) -> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : str = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : int = "" for ch in content: ans += chr(ord(a ) ^ key ) return ans def __UpperCamelCase ( self : str , a : str , a : int = 0 ) -> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(a , a ) ) except OSError: return False return True def __UpperCamelCase ( self : Optional[int] , a : str , a : int ) -> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(a , a ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
76
def lowerCamelCase__ ( _a , _a): return int((input_a, input_a).count(1) != 0) def lowerCamelCase__ ( ): assert or_gate(0 , 0) == 0 assert or_gate(0 , 1) == 1 assert or_gate(1 , 0) == 1 assert or_gate(1 , 1) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
76
1
a_ = {str(digit): digit**5 for digit in range(10)} def lowerCamelCase__ ( _a): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_a)) def lowerCamelCase__ ( ): return sum( number for number in range(1000 , 1000000) if number == digits_fifth_powers_sum(_a)) if __name__ == "__main__": print(solution())
76
a_ = 8.314_4598 def lowerCamelCase__ ( _a , _a): if temperature < 0: raise Exception("Temperature cannot be less than 0 K") if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ = 300 a_ = 28 a_ = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
76
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class _UpperCamelCase ( __A , __A ): '''simple docstring''' lowerCamelCase__ =1 @register_to_config def __init__( self : Dict , a : int = 1000 , a : Optional[Union[np.ndarray, List[float]]] = None ) -> Any: """simple docstring""" self.set_timesteps(a ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE : int = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. SCREAMING_SNAKE_CASE : str = 4 # running values SCREAMING_SNAKE_CASE : Optional[int] = [] def __UpperCamelCase ( self : Tuple , a : int , a : Union[str, torch.device] = None ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] SCREAMING_SNAKE_CASE : Dict = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE : Tuple = torch.sin(steps * math.pi / 2 ) ** 2 SCREAMING_SNAKE_CASE : Any = (1.0 - self.betas**2) ** 0.5 SCREAMING_SNAKE_CASE : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] SCREAMING_SNAKE_CASE : Optional[Any] = timesteps.to(a ) SCREAMING_SNAKE_CASE : str = [] def __UpperCamelCase ( self : int , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : bool = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) SCREAMING_SNAKE_CASE : Dict = (self.timesteps == timestep).nonzero().item() SCREAMING_SNAKE_CASE : Tuple = timestep_index + 1 SCREAMING_SNAKE_CASE : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(a ) if len(self.ets ) == 1: SCREAMING_SNAKE_CASE : List[Any] = self.ets[-1] elif len(self.ets ) == 2: SCREAMING_SNAKE_CASE : Tuple = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: SCREAMING_SNAKE_CASE : int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: SCREAMING_SNAKE_CASE : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) SCREAMING_SNAKE_CASE : List[str] = self._get_prev_sample(a , a , a , a ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a ) def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , *a : Union[str, Any] , **a : Union[str, Any] ) -> torch.FloatTensor: """simple docstring""" return sample def __UpperCamelCase ( self : List[Any] , a : str , a : Tuple , a : Any , a : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.alphas[timestep_index] SCREAMING_SNAKE_CASE : Any = self.betas[timestep_index] SCREAMING_SNAKE_CASE : str = self.alphas[prev_timestep_index] SCREAMING_SNAKE_CASE : str = self.betas[prev_timestep_index] SCREAMING_SNAKE_CASE : List[Any] = (sample - sigma * ets) / max(a , 1e-8 ) SCREAMING_SNAKE_CASE : Optional[int] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.config.num_train_timesteps
76
a_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) # get the last node from the path SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : List[Any] = list(_a) new_path.append(_a) queue.append(_a) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_a) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( _a , _a , _a): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : Optional[Any] = set(_a) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) if node == target: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_a) queue.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
76
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='naver-clova-ix/donut-base-finetuned-docvqa' lowerCamelCase__ =( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) lowerCamelCase__ ='document_qa' lowerCamelCase__ =AutoProcessor lowerCamelCase__ =VisionEncoderDecoderModel lowerCamelCase__ =['image', 'text'] lowerCamelCase__ =['text'] def __init__( self : List[str] , *a : List[Any] , **a : Any ) -> Optional[int]: """simple docstring""" if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*a , **a ) def __UpperCamelCase ( self : List[Any] , a : "Image" , a : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" SCREAMING_SNAKE_CASE : Any = task_prompt.replace("{user_input}" , a ) SCREAMING_SNAKE_CASE : List[Any] = self.pre_processor.tokenizer( a , add_special_tokens=a , return_tensors="pt" ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = self.pre_processor(a , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __UpperCamelCase ( self : List[Any] , a : Any ) -> int: """simple docstring""" return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences def __UpperCamelCase ( self : Tuple , a : Optional[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.pre_processor.batch_decode(a )[0] SCREAMING_SNAKE_CASE : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) SCREAMING_SNAKE_CASE : int = sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) SCREAMING_SNAKE_CASE : List[Any] = re.sub(R"<.*?>" , "" , a , count=1 ).strip() # remove first task start token SCREAMING_SNAKE_CASE : Any = self.pre_processor.tokenajson(a ) return sequence["answer"]
76
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
76
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =UnCLIPImageVariationPipeline lowerCamelCase__ =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} lowerCamelCase__ =IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase__ =[ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] lowerCamelCase__ =False @property def __UpperCamelCase ( self : int ) -> List[Any]: """simple docstring""" return 32 @property def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" return 32 @property def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" return self.time_input_dim @property def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return 100 @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(a ) @property def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(a ) @property def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } SCREAMING_SNAKE_CASE : Dict = UnCLIPTextProjModel(**a ) return model @property def __UpperCamelCase ( self : int ) -> str: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(**a ) return model @property def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __UpperCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" torch.manual_seed(1 ) SCREAMING_SNAKE_CASE : Tuple = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.dummy_decoder SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_proj SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder SCREAMING_SNAKE_CASE : int = self.dummy_tokenizer SCREAMING_SNAKE_CASE : str = self.dummy_super_res_first SCREAMING_SNAKE_CASE : List[Any] = self.dummy_super_res_last SCREAMING_SNAKE_CASE : str = UnCLIPScheduler( variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , ) SCREAMING_SNAKE_CASE : str = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , ) SCREAMING_SNAKE_CASE : List[str] = CLIPImageProcessor(crop_size=32 , size=32 ) SCREAMING_SNAKE_CASE : Dict = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __UpperCamelCase ( self : Any , a : str , a : Union[str, Any]=0 , a : Tuple=True ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a ) if str(a ).startswith("mps" ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(a ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=a ).manual_seed(a ) if pil_image: SCREAMING_SNAKE_CASE : Dict = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.numpy_to_pil(a )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "cpu" SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : int = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Dict = pipe(**a ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : int = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**a ) SCREAMING_SNAKE_CASE : Optional[Any] = output.images SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Dict = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : int = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : int = "cpu" SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : str = [ pipeline_inputs["image"], pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE : Dict = pipe(**a ) SCREAMING_SNAKE_CASE : Optional[int] = output.images SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : str = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE : List[str] = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = torch.device("cpu" ) class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =1 SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : str = torch.Generator(device=a ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = pipe.decoder.dtype SCREAMING_SNAKE_CASE : List[str] = 1 SCREAMING_SNAKE_CASE : List[str] = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE : List[str] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) SCREAMING_SNAKE_CASE : int = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe( **a , decoder_latents=a , super_res_latents=a ).images SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(a , pil_image=a ) # Don't pass image, instead pass embedding SCREAMING_SNAKE_CASE : List[str] = pipeline_inputs.pop("image" ) SCREAMING_SNAKE_CASE : str = pipe.image_encoder(a ).image_embeds SCREAMING_SNAKE_CASE : Optional[Any] = pipe( **a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def __UpperCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor SCREAMING_SNAKE_CASE : List[Any] = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=a , expected_max_diff=a ) @skip_mps def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch_device == "cpu" SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes SCREAMING_SNAKE_CASE : List[str] = [2, 3] self._test_inference_batch_consistent( batch_sizes=a , additional_params_copy_to_batched_inputs=a , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=a ) @skip_mps def __UpperCamelCase ( self : Tuple ) -> Any: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" return super().test_save_load_local() @skip_mps def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_cat_variation_fp16.npy" ) SCREAMING_SNAKE_CASE : str = UnCLIPImageVariationPipeline.from_pretrained( "kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Tuple = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) SCREAMING_SNAKE_CASE : str = pipeline( a , generator=a , output_type="np" , ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(a , a , 15 )
76
from math import factorial def lowerCamelCase__ ( _a , _a , _a): if successes > trials: raise ValueError("successes must be lower or equal to trials") if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers") if not isinstance(_a , _a) or not isinstance(_a , _a): raise ValueError("the function is defined for non-negative integers") if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0") SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a)) coefficient /= factorial(_a) * factorial(trials - successes) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
76
1
from __future__ import annotations def lowerCamelCase__ ( _a , _a): # Checks if the entire collection has been sorted if len(_a) <= 1 or n <= 1: return insert_next(_a , n - 1) rec_insertion_sort(_a , n - 1) def lowerCamelCase__ ( _a , _a): # Checks order between adjacent elements if index >= len(_a) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = ( collection[index], collection[index - 1], ) insert_next(_a , index + 1) if __name__ == "__main__": a_ = input('Enter integers separated by spaces: ') a_ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
76
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =CustomTokenizer pass
76
1
a_ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
76
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a_ = logging.getLogger(__name__) class _UpperCamelCase : '''simple docstring''' def __init__( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = False def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]: """simple docstring""" if not self.initialized: SCREAMING_SNAKE_CASE : List[str] = RagRetriever( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[int] = True def __UpperCamelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" self.retriever.index.init_index() def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a ) return doc_ids, retrieved_doc_embeds class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]: """simple docstring""" if index is not None and index.is_initialized() and len(a ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(a , a , a , a ) for worker in self.retrieval_workers ] ) def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) ) else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a ) @classmethod def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str: """simple docstring""" return super(a , cls ).get_tokenizers(a , a , **a ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a ) SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a ) SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator if indexed_dataset is not None: SCREAMING_SNAKE_CASE : str = "custom" SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a ) else: SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a ) return cls( a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , )
76
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ = logging.get_logger(__name__) a_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =field( default=__A , metadata={'help': 'Model type selected in the list: ' + ', '.join(__A )} ) lowerCamelCase__ =field( default=__A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowerCamelCase__ =field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase__ =field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowerCamelCase__ =field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowerCamelCase__ =field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCamelCase__ =field( default=__A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowerCamelCase__ =field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCamelCase__ =field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCamelCase__ =field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowerCamelCase__ =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='train' lowerCamelCase__ ='dev' class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 def __init__( self : List[str] , a : SquadDataTrainingArguments , a : PreTrainedTokenizer , a : Optional[int] = None , a : Union[str, Split] = Split.train , a : Optional[bool] = False , a : Optional[str] = None , a : Optional[str] = "pt" , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = args SCREAMING_SNAKE_CASE : Union[str, Any] = is_language_sensitive SCREAMING_SNAKE_CASE : Any = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a , a ): try: SCREAMING_SNAKE_CASE : Optional[int] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) SCREAMING_SNAKE_CASE : List[str] = mode # Load data features from cache or dataset file SCREAMING_SNAKE_CASE : Optional[int] = "v2" if args.version_2_with_negative else "v1" SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE : Optional[Any] = cached_features_file + ".lock" with FileLock(a ): if os.path.exists(a ) and not args.overwrite_cache: SCREAMING_SNAKE_CASE : str = time.time() SCREAMING_SNAKE_CASE : Optional[int] = torch.load(a ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. SCREAMING_SNAKE_CASE : List[str] = self.old_features["features"] SCREAMING_SNAKE_CASE : List[str] = self.old_features.get("dataset" , a ) SCREAMING_SNAKE_CASE : List[str] = self.old_features.get("examples" , a ) logger.info( F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: SCREAMING_SNAKE_CASE : List[str] = self.processor.get_dev_examples(args.data_dir ) else: SCREAMING_SNAKE_CASE : List[str] = self.processor.get_train_examples(args.data_dir ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = squad_convert_examples_to_features( examples=self.examples , tokenizer=a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a , ) SCREAMING_SNAKE_CASE : List[Any] = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , a , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : str ) -> Any: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , a : Dict ) -> Dict[str, torch.Tensor]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.features[i] SCREAMING_SNAKE_CASE : Dict = torch.tensor(feature.input_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(feature.cls_index , dtype=torch.long ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float ) SCREAMING_SNAKE_CASE : int = torch.tensor(feature.is_impossible , dtype=torch.float ) SCREAMING_SNAKE_CASE : Dict = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: SCREAMING_SNAKE_CASE : int = torch.tensor(feature.start_position , dtype=torch.long ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
76
from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : int = None def __repr__( self : str ) -> str: """simple docstring""" return F"Node({self.data})" class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = None def __iter__( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self : str ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return "->".join([str(a ) for item in self] ) def __getitem__( self : List[Any] , a : int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Tuple , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) SCREAMING_SNAKE_CASE : str = self.head for _ in range(a ): SCREAMING_SNAKE_CASE : str = current.next SCREAMING_SNAKE_CASE : Any = data def __UpperCamelCase ( self : List[str] , a : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , a ) def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None: """simple docstring""" self.insert_nth(0 , a ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) SCREAMING_SNAKE_CASE : Any = Node(a ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head SCREAMING_SNAKE_CASE : List[Any] = new_node else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : int = new_node def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data """simple docstring""" print(self ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.delete_nth(0 ) def __UpperCamelCase ( self : Any ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next.next return delete_node.data def __UpperCamelCase ( self : List[Any] ) -> bool: """simple docstring""" return self.head is None def __UpperCamelCase ( self : Optional[int] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Any = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : List[Any] = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : Any = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : str = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : Optional[Any] = prev def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList() assert linked_list.is_empty() is True assert str(_a) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10): assert len(_a) == i linked_list.insert_nth(_a , i + 1) assert str(_a) == "->".join(str(_a) for i in range(1 , 11)) linked_list.insert_head(0) linked_list.insert_tail(11) assert str(_a) == "->".join(str(_a) for i in range(0 , 12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(_a) == 9 assert str(_a) == "->".join(str(_a) for i in range(1 , 10)) assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True for i in range(0 , 9): SCREAMING_SNAKE_CASE : str = -i assert all(linked_list[i] == -i for i in range(0 , 9)) is True linked_list.reverse() assert str(_a) == "->".join(str(_a) for i in range(-8 , 1)) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = [ -9, 100, Node(77345112), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10), None, None, 12.20, ] SCREAMING_SNAKE_CASE : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(_a) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head() assert result == -9 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail() assert result == 12.2 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10) assert result is None assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_a) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_a) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ): from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("Inserting 1st at head ").strip()) linked_list.insert_head(input("Inserting 2nd at head ").strip()) print("\nPrint list:") linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail ").strip()) linked_list.insert_tail(input("Inserting 2nd at tail ").strip()) print("\nPrint list:") linked_list.print_list() print("\nDelete head") linked_list.delete_head() print("Delete tail") linked_list.delete_tail() print("\nPrint list:") linked_list.print_list() print("\nReverse linked list") linked_list.reverse() print("\nPrint list:") linked_list.print_list() print("\nString representation of linked list:") print(_a) print("\nReading/changing Node data using indexing:") print(f"Element at Position 1: {linked_list[1]}") SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip() print("New list:") print(_a) print(f"length of linked_list is : {len(_a)}") if __name__ == "__main__": main()
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger a_ = get_logger(__name__) class _UpperCamelCase ( enum.Enum ): '''simple docstring''' lowerCamelCase__ ='all_checks' lowerCamelCase__ ='basic_checks' lowerCamelCase__ ='no_checks' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a , _a=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedDownloadedFile(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else "" if len(_a) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error") logger.info("All the checksums matched successfully" + for_verification_name) class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreSplits(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedSplits(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : List[str] = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(_a) > 0: raise NonMatchingSplitsSizesError(str(_a)) logger.info("All the splits matched successfully.") def lowerCamelCase__ ( _a , _a = True): if record_checksum: SCREAMING_SNAKE_CASE : List[str] = shaaaa() with open(_a , "rb") as f: for chunk in iter(lambda: f.read(1 << 20) , b""): m.update(_a) SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest() else: SCREAMING_SNAKE_CASE : List[str] = None return {"num_bytes": os.path.getsize(_a), "checksum": checksum} def lowerCamelCase__ ( _a): if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
76
1
import os from collections.abc import Iterator def lowerCamelCase__ ( _a = "."): for dir_path, dir_names, filenames in os.walk(_a): SCREAMING_SNAKE_CASE : Dict = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_a)[1] in (".py", ".ipynb"): yield os.path.join(_a , _a).lstrip("./") def lowerCamelCase__ ( _a): return f"{i * ' '}*" if i else "\n##" def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : int = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): if (i + 1 > len(_a) or old_parts[i] != new_part) and new_part: print(f"{md_prefix(_a)} {new_part.replace('_' , ' ').title()}") return new_path def lowerCamelCase__ ( _a = "."): SCREAMING_SNAKE_CASE : Dict = "" for filepath in sorted(good_file_paths(_a)): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = os.path.split(_a) if filepath != old_path: SCREAMING_SNAKE_CASE : Optional[int] = print_path(_a , _a) SCREAMING_SNAKE_CASE : Dict = (filepath.count(os.sep) + 1) if filepath else 0 SCREAMING_SNAKE_CASE : int = f"{filepath}/{filename}".replace(" " , "%20") SCREAMING_SNAKE_CASE : str = os.path.splitext(filename.replace("_" , " ").title())[0] print(f"{md_prefix(_a)} [{filename}]({url})") if __name__ == "__main__": print_directory_md('.')
76
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _a , _a): # Load checkpoint SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu") SCREAMING_SNAKE_CASE : Dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE : List[str] = v else: SCREAMING_SNAKE_CASE : int = v SCREAMING_SNAKE_CASE : int = chkpt["params"] SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))} SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(_a , _a) print(f"Save configuration file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
76
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def lowerCamelCase__ ( _a): # picklable for multiprocessing return x.sum() def lowerCamelCase__ ( _a): # picklable for multiprocessing return i + 1 @dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 class _UpperCamelCase ( __A ): '''simple docstring''' def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = 1 SCREAMING_SNAKE_CASE : Any = [1, 2] SCREAMING_SNAKE_CASE : List[Any] = {"a": 1, "b": 2} SCREAMING_SNAKE_CASE : Tuple = {"a": [1, 2], "b": [3, 4]} SCREAMING_SNAKE_CASE : str = {"a": {"1": 1}, "b": 2} SCREAMING_SNAKE_CASE : Any = {"a": 1, "b": 2, "c": 3, "d": 4} SCREAMING_SNAKE_CASE : Optional[int] = {} SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Optional[int] = 2 SCREAMING_SNAKE_CASE : int = [2, 3] SCREAMING_SNAKE_CASE : Any = {"a": 2, "b": 3} SCREAMING_SNAKE_CASE : Union[str, Any] = {"a": [2, 3], "b": [4, 5]} SCREAMING_SNAKE_CASE : List[Any] = {"a": {"1": 2}, "b": 3} SCREAMING_SNAKE_CASE : Union[str, Any] = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) self.assertEqual(map_nested(a , a ) , a ) SCREAMING_SNAKE_CASE : List[str] = 2 self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) self.assertEqual(map_nested(a , a , num_proc=a ) , a ) SCREAMING_SNAKE_CASE : Dict = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} SCREAMING_SNAKE_CASE : List[str] = {"a": 2, "b": 0, "c": 2} SCREAMING_SNAKE_CASE : int = { "a": np.eye(2 ).astype(a ), "b": np.zeros(3 ).astype(a ), "c": np.ones(2 ).astype(a ), } self.assertEqual(map_nested(a , a , map_numpy=a ) , a ) self.assertEqual( {k: v.tolist() for k, v in map_nested(a , a , map_numpy=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(a , a , map_numpy=a , num_proc=a ) , a ) self.assertEqual( {k: v.tolist() for k, v in map_nested(a , a , map_numpy=a , num_proc=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(a ): # can't pickle a local lambda map_nested(lambda a : x + 1 , a , num_proc=a ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = {"a": 1, "b": 2} SCREAMING_SNAKE_CASE : List[Any] = {"a": 3, "b": 4} SCREAMING_SNAKE_CASE : Any = {"a": 5, "b": 6} SCREAMING_SNAKE_CASE : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(a , a , a ) ) , a ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ ='bar' SCREAMING_SNAKE_CASE : Optional[Any] = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(a , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def lowerCamelCase__ ( _a , _a , _a): with patch("datasets.utils.py_utils._single_map_nested") as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool") as mock_multiprocessing_pool: SCREAMING_SNAKE_CASE : Tuple = {f"{i}": i for i in range(_a)} SCREAMING_SNAKE_CASE : Optional[int] = map_nested(lambda _a: x + 10 , _a , num_proc=_a , parallel_min_length=16) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _UpperCamelCase ( __A ): '''simple docstring''' @require_tf def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers SCREAMING_SNAKE_CASE : Optional[Any] = layers.Dense(2 ) def gen_random_output(): SCREAMING_SNAKE_CASE : Any = tf.random.uniform((1, 3) ) return model(a ).numpy() with temp_seed(42 , set_tensorflow=a ): SCREAMING_SNAKE_CASE : Optional[int] = gen_random_output() with temp_seed(42 , set_tensorflow=a ): SCREAMING_SNAKE_CASE : List[str] = gen_random_output() SCREAMING_SNAKE_CASE : Tuple = gen_random_output() np.testing.assert_equal(a , a ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" import torch def gen_random_output(): SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Linear(3 , 2 ) SCREAMING_SNAKE_CASE : Dict = torch.rand(1 , 3 ) return model(a ).detach().numpy() with temp_seed(42 , set_pytorch=a ): SCREAMING_SNAKE_CASE : List[str] = gen_random_output() with temp_seed(42 , set_pytorch=a ): SCREAMING_SNAKE_CASE : Any = gen_random_output() SCREAMING_SNAKE_CASE : Dict = gen_random_output() np.testing.assert_equal(a , a ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __UpperCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): SCREAMING_SNAKE_CASE : Union[str, Any] = gen_random_output() with temp_seed(42 ): SCREAMING_SNAKE_CASE : Union[str, Any] = gen_random_output() SCREAMING_SNAKE_CASE : List[str] = gen_random_output() np.testing.assert_equal(a , a ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" , [{}]) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = NestedDataStructure(_a).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : List[str] = NestedDataStructure(_a).flatten() assert output == expected_output def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = A(x=1 , y="foobar") SCREAMING_SNAKE_CASE : int = {"x": 1, "y": "foobar"} assert asdict(_a) == expected_output SCREAMING_SNAKE_CASE : Optional[Any] = {"a": {"b": A(x=10 , y="foo")}, "c": [A(x=20 , y="bar")]} SCREAMING_SNAKE_CASE : Dict = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_a) == expected_output with pytest.raises(_a): asdict([1, A(x=10 , y="foo")]) def lowerCamelCase__ ( _a): return text.split() def lowerCamelCase__ ( _a): yield (time.time(), content) time.sleep(2) yield (time.time(), content) def lowerCamelCase__ ( ): with Pool(2) as pool: SCREAMING_SNAKE_CASE : Tuple = list(iflatmap_unordered(_a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(_a) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2) as pool: SCREAMING_SNAKE_CASE : int = list(iflatmap_unordered(_a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(_a) == 20 # check that we get items as fast as possible with Pool(2) as pool: SCREAMING_SNAKE_CASE : Tuple = [] for yield_time, content in iflatmap_unordered( _a , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}]): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_a) assert out.count("a") == 2 assert out.count("b") == 2 assert len(_a) == 4
76
def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(a - b) for a, b in zip(_a , _a))) def lowerCamelCase__ ( _a): if point: if isinstance(_a , _a): for item in point: if not isinstance(_a , (int, float)): SCREAMING_SNAKE_CASE : List[Any] = ( "Expected a list of numbers as input, found " f"{type(_a).__name__}" ) raise TypeError(_a) else: SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}" raise TypeError(_a) else: raise ValueError("Missing an input") def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(x - y) for x, y in zip(_a , _a))) if __name__ == "__main__": import doctest doctest.testmod()
76
1
from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : int = None def __repr__( self : str ) -> str: """simple docstring""" return F"Node({self.data})" class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = None def __iter__( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self : str ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return "->".join([str(a ) for item in self] ) def __getitem__( self : List[Any] , a : int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Tuple , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) SCREAMING_SNAKE_CASE : str = self.head for _ in range(a ): SCREAMING_SNAKE_CASE : str = current.next SCREAMING_SNAKE_CASE : Any = data def __UpperCamelCase ( self : List[str] , a : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , a ) def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None: """simple docstring""" self.insert_nth(0 , a ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) SCREAMING_SNAKE_CASE : Any = Node(a ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head SCREAMING_SNAKE_CASE : List[Any] = new_node else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : int = new_node def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data """simple docstring""" print(self ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.delete_nth(0 ) def __UpperCamelCase ( self : Any ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next.next return delete_node.data def __UpperCamelCase ( self : List[Any] ) -> bool: """simple docstring""" return self.head is None def __UpperCamelCase ( self : Optional[int] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Any = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : List[Any] = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : Any = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : str = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : Optional[Any] = prev def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList() assert linked_list.is_empty() is True assert str(_a) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10): assert len(_a) == i linked_list.insert_nth(_a , i + 1) assert str(_a) == "->".join(str(_a) for i in range(1 , 11)) linked_list.insert_head(0) linked_list.insert_tail(11) assert str(_a) == "->".join(str(_a) for i in range(0 , 12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(_a) == 9 assert str(_a) == "->".join(str(_a) for i in range(1 , 10)) assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True for i in range(0 , 9): SCREAMING_SNAKE_CASE : str = -i assert all(linked_list[i] == -i for i in range(0 , 9)) is True linked_list.reverse() assert str(_a) == "->".join(str(_a) for i in range(-8 , 1)) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = [ -9, 100, Node(77345112), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10), None, None, 12.20, ] SCREAMING_SNAKE_CASE : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(_a) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head() assert result == -9 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail() assert result == 12.2 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10) assert result is None assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_a) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_a) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ): from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("Inserting 1st at head ").strip()) linked_list.insert_head(input("Inserting 2nd at head ").strip()) print("\nPrint list:") linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail ").strip()) linked_list.insert_tail(input("Inserting 2nd at tail ").strip()) print("\nPrint list:") linked_list.print_list() print("\nDelete head") linked_list.delete_head() print("Delete tail") linked_list.delete_tail() print("\nPrint list:") linked_list.print_list() print("\nReverse linked list") linked_list.reverse() print("\nPrint list:") linked_list.print_list() print("\nString representation of linked list:") print(_a) print("\nReading/changing Node data using indexing:") print(f"Element at Position 1: {linked_list[1]}") SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip() print("New list:") print(_a) print(f"length of linked_list is : {len(_a)}") if __name__ == "__main__": main()
76
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='vit_msn' def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Tuple = patch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = qkv_bias
76
1
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) a_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : int = "https://pypi.org/pypi/diffusers/json" SCREAMING_SNAKE_CASE : Tuple = json.loads(request.urlopen(_a).read())["releases"].keys() return sorted(_a , key=lambda _a: version.Version(_a)) def lowerCamelCase__ ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_a) os.makedirs(_a , exist_ok=_a) SCREAMING_SNAKE_CASE : str = Path(_a) / "__init__.py" if not init_path.exists(): init_path.touch() def lowerCamelCase__ ( _a): init_hf_modules() SCREAMING_SNAKE_CASE : Optional[int] = Path(_a) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent) os.makedirs(_a , exist_ok=_a) SCREAMING_SNAKE_CASE : List[Any] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def lowerCamelCase__ ( _a): with open(_a , "r" , encoding="utf-8") as f: SCREAMING_SNAKE_CASE : Union[str, Any] = f.read() # Imports of the form `import .xxx` SCREAMING_SNAKE_CASE : Optional[Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _a , flags=re.MULTILINE) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _a , flags=re.MULTILINE) # Unique-ify return list(set(_a)) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Union[str, Any] = [module_file] SCREAMING_SNAKE_CASE : Dict = [] # Let's recurse through all relative imports while not no_change: SCREAMING_SNAKE_CASE : List[str] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_a)) SCREAMING_SNAKE_CASE : Union[str, Any] = Path(_a).parent SCREAMING_SNAKE_CASE : Union[str, Any] = [str(module_path / m) for m in new_imports] SCREAMING_SNAKE_CASE : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports] SCREAMING_SNAKE_CASE : Optional[Any] = [f"{f}.py" for f in new_import_files] SCREAMING_SNAKE_CASE : Dict = len(_a) == 0 all_relative_imports.extend(_a) return all_relative_imports def lowerCamelCase__ ( _a): with open(_a , "r" , encoding="utf-8") as f: SCREAMING_SNAKE_CASE : List[str] = f.read() # Imports of the form `import xxx` SCREAMING_SNAKE_CASE : Optional[int] = re.findall("^\s*import\s+(\S+)\s*$" , _a , flags=re.MULTILINE) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _a , flags=re.MULTILINE) # Only keep the top-level module SCREAMING_SNAKE_CASE : Optional[int] = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] # Unique-ify and test we got them all SCREAMING_SNAKE_CASE : List[str] = list(set(_a)) SCREAMING_SNAKE_CASE : List[str] = [] for imp in imports: try: importlib.import_module(_a) except ImportError: missing_packages.append(_a) if len(_a) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"{', '.join(_a)}. Run `pip install {' '.join(_a)}`") return get_relative_imports(_a) def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Optional[int] = module_path.replace(os.path.sep , ".") SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(_a) if class_name is None: return find_pipeline_class(_a) return getattr(_a , _a) def lowerCamelCase__ ( _a): from ..pipelines import DiffusionPipeline SCREAMING_SNAKE_CASE : Optional[int] = dict(inspect.getmembers(_a , inspect.isclass)) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _a) and cls.__module__.split(".")[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" f" {loaded_module}.") SCREAMING_SNAKE_CASE : Any = cls return pipeline_class def lowerCamelCase__ ( _a , _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , ): SCREAMING_SNAKE_CASE : Optional[int] = str(_a) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_a , _a) if os.path.isfile(_a): SCREAMING_SNAKE_CASE : str = module_file_or_url SCREAMING_SNAKE_CASE : List[Any] = "local" elif pretrained_model_name_or_path.count("/") == 0: SCREAMING_SNAKE_CASE : Any = get_diffusers_versions() # cut ".dev0" SCREAMING_SNAKE_CASE : Tuple = "v" + ".".join(__version__.split(".")[:3]) # retrieve github version that matches if revision is None: SCREAMING_SNAKE_CASE : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"Defaulting to latest_version: {revision}.") elif revision in available_versions: SCREAMING_SNAKE_CASE : Optional[int] = f"v{revision}" elif revision == "main": SCREAMING_SNAKE_CASE : Dict = revision else: raise ValueError( f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" f" {', '.join(available_versions + ['main'])}.") # community pipeline on GitHub SCREAMING_SNAKE_CASE : List[Any] = COMMUNITY_PIPELINES_URL.format(revision=_a , pipeline=_a) try: SCREAMING_SNAKE_CASE : List[str] = cached_download( _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , ) SCREAMING_SNAKE_CASE : Any = "git" SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") raise else: try: # Load from URL or cache if already cached SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( _a , _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , ) SCREAMING_SNAKE_CASE : Any = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/"))) except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") raise # Check we have all the requirements in our environment SCREAMING_SNAKE_CASE : int = check_imports(_a) # Now we move the module inside our cached dynamic modules. SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_a) SCREAMING_SNAKE_CASE : str = Path(_a) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_a , submodule_path / module_file) for module_needed in modules_needed: SCREAMING_SNAKE_CASE : str = f"{module_needed}.py" shutil.copy(os.path.join(_a , _a) , submodule_path / module_needed) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_a , _a): SCREAMING_SNAKE_CASE : List[Any] = use_auth_token elif use_auth_token is True: SCREAMING_SNAKE_CASE : Union[str, Any] = HfFolder.get_token() else: SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Union[str, Any] = model_info(_a , revision=_a , token=_a).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. SCREAMING_SNAKE_CASE : str = submodule_path / commit_hash SCREAMING_SNAKE_CASE : Optional[Any] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_a) if not (submodule_path / module_file).exists(): shutil.copy(_a , submodule_path / module_file) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _a , f"{module_needed}.py" , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , ) return os.path.join(_a , _a) def lowerCamelCase__ ( _a , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ): SCREAMING_SNAKE_CASE : str = get_cached_module_file( _a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , ) return get_class_in_module(_a , final_module.replace(".py" , ""))
76
import baseaa def lowerCamelCase__ ( _a): return baseaa.aaaencode(string.encode("utf-8")) def lowerCamelCase__ ( _a): return baseaa.aaadecode(_a).decode("utf-8") if __name__ == "__main__": import doctest doctest.testmod()
76
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a_ = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from datetime import datetime as dt import os from github import Github a_ = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"]) SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers") SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open") for issue in open_issues: SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a) SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed") elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored.") if __name__ == "__main__": main()
76
1
import comet # From: unbabel-comet import torch import datasets a_ = datasets.logging.get_logger(__name__) a_ = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n' a_ = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n' a_ = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __UpperCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def __UpperCamelCase ( self : Any , a : List[Any] ) -> Optional[Any]: """simple docstring""" if self.config_name == "default": SCREAMING_SNAKE_CASE : str = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: SCREAMING_SNAKE_CASE : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def __UpperCamelCase ( self : Tuple , a : str , a : Any , a : Optional[int] , a : Optional[int]=None , a : Any=False ) -> int: """simple docstring""" if gpus is None: SCREAMING_SNAKE_CASE : Optional[Any] = 1 if torch.cuda.is_available() else 0 SCREAMING_SNAKE_CASE : List[Any] = {"src": sources, "mt": predictions, "ref": references} SCREAMING_SNAKE_CASE : Dict = [dict(zip(a , a ) ) for t in zip(*data.values() )] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.scorer.predict(a , gpus=a , progress_bar=a ) return {"mean_score": mean_score, "scores": scores}
76
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(_a): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =['pixel_values'] def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256} SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a ) SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" ) SCREAMING_SNAKE_CASE : Dict = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop SCREAMING_SNAKE_CASE : int = crop_size SCREAMING_SNAKE_CASE : int = resample SCREAMING_SNAKE_CASE : Any = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : Tuple = offset SCREAMING_SNAKE_CASE : str = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a ) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"]) else: raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a , size=a , resample=a , data_format=a , **a ) def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE : str = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a ) def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = image.astype(np.floataa ) if offset: SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2) return rescale(a , scale=a , data_format=a , **a ) def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(a , mean=a , std=a , data_format=a , **a ) def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a ) if do_resize: SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a ) if do_center_crop: SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a ) if do_rescale: SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a ) if do_normalize: SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a ) SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a ) return image def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : int = size if size is not None else self.size SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a ) SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" ) if not valid_images(a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a ) SCREAMING_SNAKE_CASE : List[Any] = [ [ self._preprocess_image( image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos} return BatchFeature(data=a , tensor_type=a )
76
1
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='FlavaImageProcessor' lowerCamelCase__ =('BertTokenizer', 'BertTokenizerFast') def __init__( self : Dict , a : List[Any]=None , a : List[Any]=None , **a : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , a , ) SCREAMING_SNAKE_CASE : Any = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(a , a ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor def __call__( self : Tuple , a : Optional[ImageInput] = None , a : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = False , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Optional[Any] , ) -> Optional[int]: """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: SCREAMING_SNAKE_CASE : List[str] = self.tokenizer( text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) if images is not None: SCREAMING_SNAKE_CASE : Dict = self.image_processor( a , return_image_mask=a , return_codebook_pixels=a , return_tensors=a , **a , ) if text is not None and images is not None: encoding.update(a ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def __UpperCamelCase ( self : Any , *a : Union[str, Any] , **a : Optional[int] ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def __UpperCamelCase ( self : str , *a : Dict , **a : str ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def __UpperCamelCase ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , ) return self.image_processor_class @property def __UpperCamelCase ( self : Dict ) -> Any: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , ) return self.image_processor
76
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt'} a_ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a_ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } a_ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =ConvBertTokenizer def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict: """simple docstring""" super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , a ) != do_lower_case or normalizer_state.get("strip_accents" , a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : Any = strip_accents SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a ) SCREAMING_SNAKE_CASE : str = do_lower_case def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a ) return tuple(a )
76
1
from __future__ import annotations def lowerCamelCase__ ( _a = 4): SCREAMING_SNAKE_CASE : Optional[Any] = abs(_a) or 4 return [[1 + x + y * row_size for x in range(_a)] for y in range(_a)] def lowerCamelCase__ ( _a): return reverse_row(transpose(_a)) # OR.. transpose(reverse_column(matrix)) def lowerCamelCase__ ( _a): return reverse_row(reverse_column(_a)) # OR.. reverse_column(reverse_row(matrix)) def lowerCamelCase__ ( _a): return reverse_column(transpose(_a)) # OR.. transpose(reverse_row(matrix)) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[int] = [list(_a) for x in zip(*_a)] return matrix def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Tuple = matrix[::-1] return matrix def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[Any] = [x[::-1] for x in matrix] return matrix def lowerCamelCase__ ( _a): for i in matrix: print(*_a) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a_ = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase__ ( _a): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_a) def lowerCamelCase__ ( _a): from diffusers.utils.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(_a , id=_a)
76
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =['input_features'] def __init__( self : Optional[Any] , a : str=80 , a : Union[str, Any]=1_6000 , a : List[str]=160 , a : Any=30 , a : Dict=400 , a : Optional[int]=0.0 , a : Union[str, Any]=False , **a : Optional[int] , ) -> List[str]: """simple docstring""" super().__init__( feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , ) SCREAMING_SNAKE_CASE : Tuple = n_fft SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : Union[str, Any] = chunk_length SCREAMING_SNAKE_CASE : List[str] = chunk_length * sampling_rate SCREAMING_SNAKE_CASE : Union[str, Any] = self.n_samples // hop_length SCREAMING_SNAKE_CASE : Dict = sampling_rate SCREAMING_SNAKE_CASE : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a , norm="slaney" , mel_scale="slaney" , ) def __UpperCamelCase ( self : Union[str, Any] , a : np.array ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = spectrogram( a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) SCREAMING_SNAKE_CASE : str = log_spec[:, :-1] SCREAMING_SNAKE_CASE : Optional[Any] = np.maximum(a , log_spec.max() - 8.0 ) SCREAMING_SNAKE_CASE : Optional[Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __UpperCamelCase ( a : List[np.ndarray] , a : List[np.ndarray] , a : float = 0.0 ) -> List[np.ndarray]: """simple docstring""" if attention_mask is not None: SCREAMING_SNAKE_CASE : str = np.array(a , np.intaa ) SCREAMING_SNAKE_CASE : int = [] for vector, length in zip(a , attention_mask.sum(-1 ) ): SCREAMING_SNAKE_CASE : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: SCREAMING_SNAKE_CASE : List[str] = padding_value normed_input_values.append(a ) else: SCREAMING_SNAKE_CASE : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : bool = True , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = None , a : Optional[str] = "max_length" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : Union[str, Any] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) SCREAMING_SNAKE_CASE : int = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): SCREAMING_SNAKE_CASE : Dict = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray([raw_speech] ).T] SCREAMING_SNAKE_CASE : Any = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = self.pad( a , padding=a , max_length=max_length if max_length else self.n_samples , truncation=a , pad_to_multiple_of=a , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: SCREAMING_SNAKE_CASE : Optional[int] = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) SCREAMING_SNAKE_CASE : Any = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) SCREAMING_SNAKE_CASE : Tuple = [self._np_extract_fbank_features(a ) for waveform in input_features[0]] if isinstance(input_features[0] , a ): SCREAMING_SNAKE_CASE : int = [np.asarray(a , dtype=np.floataa ) for feature in input_features] else: SCREAMING_SNAKE_CASE : Union[str, Any] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) SCREAMING_SNAKE_CASE : Tuple = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.convert_to_tensors(a ) return padded_inputs def __UpperCamelCase ( self : int ) -> Dict[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
76
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : str = max_resolution SCREAMING_SNAKE_CASE : int = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : int = do_normalize SCREAMING_SNAKE_CASE : Tuple = image_mean SCREAMING_SNAKE_CASE : Tuple = image_std def __UpperCamelCase ( self : Any ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self ) @property def __UpperCamelCase ( self : Any ) -> List[str]: """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , "image_mean" ) ) self.assertTrue(hasattr(a , "image_std" ) ) self.assertTrue(hasattr(a , "do_normalize" ) ) self.assertTrue(hasattr(a , "do_resize" ) ) self.assertTrue(hasattr(a , "size" ) ) def __UpperCamelCase ( self : int ) -> str: """simple docstring""" pass def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
76
1
def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : str = 0 # if input_string is "aba" than new_input_string become "a|b|a" SCREAMING_SNAKE_CASE : Any = "" SCREAMING_SNAKE_CASE : Any = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_a) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = 0, 0 # length[i] shows the length of palindromic substring with center i SCREAMING_SNAKE_CASE : Any = [1 for i in range(len(_a))] # for each character in new_string find corresponding palindromic string SCREAMING_SNAKE_CASE : List[str] = 0 for j in range(len(_a)): SCREAMING_SNAKE_CASE : Optional[int] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1) while ( j - k >= 0 and j + k < len(_a) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 SCREAMING_SNAKE_CASE : Optional[int] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: SCREAMING_SNAKE_CASE : Optional[Any] = j - k + 1 # noqa: E741 SCREAMING_SNAKE_CASE : Optional[int] = j + k - 1 # update max_length and start position if max_length < length[j]: SCREAMING_SNAKE_CASE : Optional[int] = length[j] SCREAMING_SNAKE_CASE : int = j # create that string SCREAMING_SNAKE_CASE : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
76
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"] SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"]) return output a_ = HfArgumentParser(PretokenizationArguments) a_ = parser.parse_args() if args.num_workers is None: a_ = multiprocessing.cpu_count() a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) a_ = time.time() a_ = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a_ = time.time() a_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
76
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): a_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right a_ = 12_8022 a_ = 12_8028 @require_sentencepiece class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MaMaaaTokenizer lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =True def __UpperCamelCase ( self : Optional[Any] ) -> str: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] SCREAMING_SNAKE_CASE : str = dict(zip(a , range(len(a ) ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = Path(self.tmpdirname ) save_json(a , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(a , save_dir / VOCAB_FILES_NAMES["spm_file"] ) SCREAMING_SNAKE_CASE : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : str , **a : Dict ) -> List[Any]: """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a ) def __UpperCamelCase ( self : List[str] , a : List[str] ) -> Any: """simple docstring""" return ( "This is a test", "This is a test", ) def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = "</s>" SCREAMING_SNAKE_CASE : List[str] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE : List[Any] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [2, 3, 4, 5, 6] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_string(a ) self.assertEqual(a , "This is a test" ) @slow def __UpperCamelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ ='facebook/m2m100_418M' lowerCamelCase__ =[ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase__ =[ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase__ =[EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __UpperCamelCase ( cls : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) SCREAMING_SNAKE_CASE : Union[str, Any] = 1 return cls def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 ) def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.tokenizer.get_vocab() self.assertEqual(len(a ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , a ) def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Any = "en" SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a ) def __UpperCamelCase ( self : str ) -> List[Any]: """simple docstring""" self.assertIn(a , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(a , skip_special_tokens=a ) SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a ) self.assertEqual(a , a ) self.assertNotIn(self.tokenizer.eos_token , a ) def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Dict = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(a ) SCREAMING_SNAKE_CASE : int = MaMaaaTokenizer.from_pretrained(a ) self.assertDictEqual(new_tok.lang_token_to_id , a ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = "en" SCREAMING_SNAKE_CASE : List[Any] = "fr" SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" ) SCREAMING_SNAKE_CASE : int = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: SCREAMING_SNAKE_CASE : List[str] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) SCREAMING_SNAKE_CASE : Any = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) SCREAMING_SNAKE_CASE : int = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(a ) , { # en_XX, A, test, EOS "input_ids": [[12_8022, 58, 4183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 12_8006, } , )
76
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): # initialize config if "resnet-50" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50") elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101") else: raise ValueError("Model name should include either resnet50 or resnet101") SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a) # set label attributes SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE : Union[str, Any] = 250 else: SCREAMING_SNAKE_CASE : Union[str, Any] = 91 SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r")) SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase__ ( _a): # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE : Union[str, Any] = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", )) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", )) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", )) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ]) return rename_keys def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : str = state_dict.pop(_a) SCREAMING_SNAKE_CASE : int = val def lowerCamelCase__ ( _a , _a=False): SCREAMING_SNAKE_CASE : Optional[Any] = "" if is_panoptic: SCREAMING_SNAKE_CASE : Optional[int] = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : int = in_proj_bias[:256] SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight") SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw) return im @torch.no_grad() def lowerCamelCase__ ( _a , _a=None , _a=False): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a) # load original model from torch hub SCREAMING_SNAKE_CASE : Union[str, Any] = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"Converting model {model_name}...") SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval() SCREAMING_SNAKE_CASE : Tuple = detr.state_dict() # rename keys for src, dest in create_rename_keys(_a): if is_panoptic: SCREAMING_SNAKE_CASE : List[str] = "detr." + src rename_key(_a , _a , _a) # query, key and value matrices need special treatment read_in_q_k_v(_a , is_panoptic=_a) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Optional[int] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : List[Any] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Any = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a) model.load_state_dict(_a) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a) SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt") SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a) SCREAMING_SNAKE_CASE : Any = model(_a) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(_a).mkdir(exist_ok=_a) model.save_pretrained(_a) processor.save_pretrained(_a) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def lowerCamelCase__ ( _a , _a): return math.sqrt(sum(pow(a - b , 2) for a, b in zip(_a , _a))) def lowerCamelCase__ ( _a , _a): if dataset.ndim != value_array.ndim: SCREAMING_SNAKE_CASE : int = ( "Wrong input data's dimensions... " f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(_a) try: if dataset.shape[1] != value_array.shape[1]: SCREAMING_SNAKE_CASE : Optional[Any] = ( "Wrong input data's shape... " f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(_a) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape") if dataset.dtype != value_array.dtype: SCREAMING_SNAKE_CASE : Dict = ( "Input data have different datatype... " f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(_a) SCREAMING_SNAKE_CASE : List[Any] = [] for value in value_array: SCREAMING_SNAKE_CASE : Dict = euclidean(_a , dataset[0]) SCREAMING_SNAKE_CASE : List[str] = dataset[0].tolist() for dataset_value in dataset[1:]: SCREAMING_SNAKE_CASE : int = euclidean(_a , _a) if dist > temp_dist: SCREAMING_SNAKE_CASE : List[Any] = temp_dist SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_value.tolist() answer.append([vector, dist]) return answer def lowerCamelCase__ ( _a , _a): return np.dot(_a , _a) / (norm(_a) * norm(_a)) if __name__ == "__main__": import doctest doctest.testmod()
76
import os def lowerCamelCase__ ( ): with open(os.path.dirname(_a) + "/p022_names.txt") as file: SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0]) SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",") names.sort() SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Dict = 0 for i, name in enumerate(_a): for letter in name: name_score += ord(_a) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE : str = 0 return total_score if __name__ == "__main__": print(solution())
76
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a_ = 'base_with_context' def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"])) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a) for lyr_num, lyr in enumerate(model.encoders): SCREAMING_SNAKE_CASE : List[Any] = weights[f"layers_{lyr_num}"] SCREAMING_SNAKE_CASE : Any = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : Tuple = ly_weight["attention"] SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T)) SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T)) SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"])) return model def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T)) SCREAMING_SNAKE_CASE : Any = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a) for lyr_num, lyr in enumerate(model.encoders): SCREAMING_SNAKE_CASE : Optional[Any] = weights[f"layers_{lyr_num}"] SCREAMING_SNAKE_CASE : Union[str, Any] = ly_weight["attention"] SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T)) SCREAMING_SNAKE_CASE : int = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T)) SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T)) SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T)) SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"])) return model def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T)) SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a) SCREAMING_SNAKE_CASE : Dict = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T)) for lyr_num, lyr in enumerate(model.decoders): SCREAMING_SNAKE_CASE : Optional[int] = weights[f"layers_{lyr_num}"] SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : Any = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T)) SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["self_attention"] SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T)) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T)) SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T)) SCREAMING_SNAKE_CASE : List[Any] = ly_weight["MultiHeadDotProductAttention_0"] SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T)) SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T)) SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T)) SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T)) SCREAMING_SNAKE_CASE : Dict = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"])) SCREAMING_SNAKE_CASE : str = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T)) SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T)) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T)) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T)) SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"])) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T)) return model def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path) SCREAMING_SNAKE_CASE : Optional[int] = jnp.tree_util.tree_map(onp.array , _a) SCREAMING_SNAKE_CASE : Dict = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] SCREAMING_SNAKE_CASE : int = os.path.join(args.checkpoint_path , ".." , "config.gin") SCREAMING_SNAKE_CASE : List[str] = inference.parse_training_gin_file(_a , _a) SCREAMING_SNAKE_CASE : str = inference.InferenceModel(args.checkpoint_path , _a) SCREAMING_SNAKE_CASE : Any = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large") SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) SCREAMING_SNAKE_CASE : Dict = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) SCREAMING_SNAKE_CASE : Union[str, Any] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) SCREAMING_SNAKE_CASE : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _a) SCREAMING_SNAKE_CASE : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _a) SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["target"]["decoder"] , _a) SCREAMING_SNAKE_CASE : Tuple = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder") SCREAMING_SNAKE_CASE : Dict = SpectrogramDiffusionPipeline( notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , ) if args.save: pipe.save_pretrained(args.output_path) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help='Path to the original jax model checkpoint.', ) a_ = parser.parse_args() main(args)
76
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
1
from timeit import timeit a_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Tuple = len(_a) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[str] = len(_a) // 2 SCREAMING_SNAKE_CASE : Optional[int] = len(_a) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_a)) def lowerCamelCase__ ( _a): if len(_a) <= 2: return True if s[0] == s[len(_a) - 1]: return is_palindrome_recursive(s[1:-1]) else: return False def lowerCamelCase__ ( _a): return s == s[::-1] def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[int] = f"all({name}(key) is value for key, value in test_data.items())" SCREAMING_SNAKE_CASE : Tuple = f"from __main__ import test_data, {name}" SCREAMING_SNAKE_CASE : Optional[int] = 500000 SCREAMING_SNAKE_CASE : Optional[Any] = timeit(stmt=_a , setup=_a , number=_a) print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds") if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F'''{key:21} {value}''') print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
76
def lowerCamelCase__ ( _a , _a): return int((input_a, input_a).count(1) != 0) def lowerCamelCase__ ( ): assert or_gate(0 , 0) == 0 assert or_gate(0 , 1) == 1 assert or_gate(1 , 0) == 1 assert or_gate(1 , 1) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
76
1
from __future__ import annotations class _UpperCamelCase : '''simple docstring''' def __init__( self : Tuple , a : str , a : str ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = text, pattern SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = len(a ), len(a ) def __UpperCamelCase ( self : Optional[Any] , a : str ) -> int: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def __UpperCamelCase ( self : int , a : int ) -> int: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __UpperCamelCase ( self : Tuple ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): SCREAMING_SNAKE_CASE : str = self.mismatch_in_text(a ) if mismatch_index == -1: positions.append(a ) else: SCREAMING_SNAKE_CASE : Any = self.match_in_pattern(self.text[mismatch_index] ) SCREAMING_SNAKE_CASE : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions a_ = 'ABAABA' a_ = 'AB' a_ = BoyerMooreSearch(text, pattern) a_ = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
76
a_ = 8.314_4598 def lowerCamelCase__ ( _a , _a): if temperature < 0: raise Exception("Temperature cannot be less than 0 K") if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ = 300 a_ = 28 a_ = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
76
1
from collections import Counter from timeit import timeit def lowerCamelCase__ ( _a = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "").lower()).values()) < 2 def lowerCamelCase__ ( _a = ""): if len(_a) == 0: return True SCREAMING_SNAKE_CASE : str = input_str.replace(" " , "").lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE : dict[str, int] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE : Dict = character_freq_dict.get(_a , 0) + 1 SCREAMING_SNAKE_CASE : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCamelCase__ ( _a = ""): print("\nFor string = " , _a , ":") print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_a) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_a) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": a_ = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) a_ = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
76
a_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) # get the last node from the path SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : List[Any] = list(_a) new_path.append(_a) queue.append(_a) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_a) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( _a , _a , _a): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : Optional[Any] = set(_a) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) if node == target: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_a) queue.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
76
1
a_ = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } a_ = {value: key for key, value in encode_dict.items()} def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[int] = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("encode() accepts only letters of the alphabet and spaces") return encoded def lowerCamelCase__ ( _a): if set(_a) - {"A", "B", " "} != set(): raise Exception("decode() accepts only 'A', 'B' and spaces") SCREAMING_SNAKE_CASE : Optional[Any] = "" for word in coded.split(): while len(_a) != 0: decoded += decode_dict[word[:5]] SCREAMING_SNAKE_CASE : Tuple = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
76
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from math import factorial def lowerCamelCase__ ( _a , _a , _a): if successes > trials: raise ValueError("successes must be lower or equal to trials") if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers") if not isinstance(_a , _a) or not isinstance(_a , _a): raise ValueError("the function is defined for non-negative integers") if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0") SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a)) coefficient /= factorial(_a) * factorial(trials - successes) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
76
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCamelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase__ =( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def __UpperCamelCase ( self : str , a : List[str] , a : Union[str, Any] , a : Dict=False ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class in get_values(a ): SCREAMING_SNAKE_CASE : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Tuple , a : Optional[int] , a : Any=13 , a : Optional[Any]=7 , a : List[Any]=True , a : int=True , a : Optional[int]=True , a : int=True , a : str=99 , a : List[Any]=32 , a : Union[str, Any]=32 , a : List[Any]=2 , a : List[Any]=4 , a : Optional[int]=37 , a : Optional[int]="gelu" , a : List[str]=0.1 , a : Optional[int]=0.1 , a : List[str]=512 , a : Any=16 , a : Tuple=2 , a : Optional[Any]=0.02 , a : Optional[Any]=3 , a : Optional[int]=4 , a : Union[str, Any]=None , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[Any] = seq_length SCREAMING_SNAKE_CASE : Optional[int] = is_training SCREAMING_SNAKE_CASE : List[Any] = use_input_mask SCREAMING_SNAKE_CASE : Dict = use_token_type_ids SCREAMING_SNAKE_CASE : str = use_labels SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = max_position_embeddings SCREAMING_SNAKE_CASE : str = type_vocab_size SCREAMING_SNAKE_CASE : Any = type_sequence_label_size SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : str = num_choices SCREAMING_SNAKE_CASE : List[str] = scope SCREAMING_SNAKE_CASE : List[Any] = embedding_size def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : str = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Any = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : List[str] , a : Union[str, Any] , a : List[str] , a : Tuple , a : Any , a : Tuple , a : int , a : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = TFMobileBertModel(config=a ) SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : Dict = model(a ) SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask] SCREAMING_SNAKE_CASE : Union[str, Any] = model(a ) SCREAMING_SNAKE_CASE : Optional[int] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase ( self : int , a : str , a : Optional[Any] , a : List[Any] , a : List[Any] , a : Any , a : int , a : List[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = TFMobileBertForMaskedLM(config=a ) SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Optional[int] , a : List[str] , a : Any , a : Optional[Any] , a : Optional[int] , a : Tuple , a : List[str] , a : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = TFMobileBertForNextSentencePrediction(config=a ) SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __UpperCamelCase ( self : Union[str, Any] , a : Dict , a : str , a : List[str] , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining(config=a ) SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : Optional[int] = model(a ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __UpperCamelCase ( self : Union[str, Any] , a : int , a : Dict , a : str , a : Union[str, Any] , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.num_labels SCREAMING_SNAKE_CASE : Dict = TFMobileBertForSequenceClassification(config=a ) SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : List[Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : List[Any] , a : Dict , a : Union[str, Any] , a : Any , a : int , a : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices SCREAMING_SNAKE_CASE : Any = TFMobileBertForMultipleChoice(config=a ) SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : str = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE : str = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : str , a : Any , a : Dict , a : str , a : Optional[Any] , a : Dict , a : str , a : List[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.num_labels SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForTokenClassification(config=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : str = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : int , a : Any , a : List[Any] , a : int , a : List[str] , a : Any , a : Dict , a : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = TFMobileBertForQuestionAnswering(config=a ) SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE : Optional[int] = model(a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) ,( SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = TFMobileBertModelTest.TFMobileBertModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 ) def __UpperCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*a ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*a ) def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a ) def __UpperCamelCase ( self : Dict ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a ) def __UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*a ) def __UpperCamelCase ( self : Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*a ) def __UpperCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a ) def __UpperCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*a ) @slow def __UpperCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" for model_name in ["google/mobilebert-uncased"]: SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_tf class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) SCREAMING_SNAKE_CASE : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE : int = model(a )[0] SCREAMING_SNAKE_CASE : Optional[int] = [1, 6, 3_0522] self.assertEqual(output.shape , a ) SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant( [ [ [-4.591_9547, -9.24_8295, -9.64_5256], [-6.730_6175, -6.44_0284, -6.605_2837], [-7.274_3506, -6.784_7915, -6.02_4673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 )
76
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =CustomTokenizer pass
76
1
def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[int] = len(_a) for i in range(1 , _a): SCREAMING_SNAKE_CASE : Optional[int] = collection[i] SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = i - 1 while low <= high: SCREAMING_SNAKE_CASE : Any = (low + high) // 2 if val < collection[mid]: SCREAMING_SNAKE_CASE : str = mid - 1 else: SCREAMING_SNAKE_CASE : Any = mid + 1 for j in range(_a , _a , -1): SCREAMING_SNAKE_CASE : int = collection[j - 1] SCREAMING_SNAKE_CASE : Any = val return collection if __name__ == "__main__": a_ = input('Enter numbers separated by a comma:\n').strip() a_ = [int(item) for item in user_input.split(',')] print(binary_insertion_sort(unsorted))
76
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a_ = logging.getLogger(__name__) class _UpperCamelCase : '''simple docstring''' def __init__( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = False def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]: """simple docstring""" if not self.initialized: SCREAMING_SNAKE_CASE : List[str] = RagRetriever( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[int] = True def __UpperCamelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" self.retriever.index.init_index() def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a ) return doc_ids, retrieved_doc_embeds class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]: """simple docstring""" if index is not None and index.is_initialized() and len(a ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(a , a , a , a ) for worker in self.retrieval_workers ] ) def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) ) else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a ) @classmethod def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str: """simple docstring""" return super(a , cls ).get_tokenizers(a , a , **a ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a ) SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a ) SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator if indexed_dataset is not None: SCREAMING_SNAKE_CASE : str = "custom" SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a ) else: SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a ) return cls( a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , )
76
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ = 16 a_ = 32 def lowerCamelCase__ ( _a , _a = 16 , _a = "bert-base-cased"): SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(_a) SCREAMING_SNAKE_CASE : Optional[int] = load_dataset("glue" , "mrpc") def tokenize_function(_a): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_a , max_length=_a) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE : List[Any] = datasets.map( _a , batched=_a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_a) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE : str = tokenized_datasets.rename_column("label" , "labels") def collate_fn(_a): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_a , padding="max_length" , max_length=128 , return_tensors="pt") return tokenizer.pad(_a , padding="longest" , return_tensors="pt") # Instantiate dataloaders. SCREAMING_SNAKE_CASE : str = DataLoader( tokenized_datasets["train"] , shuffle=_a , collate_fn=_a , batch_size=_a) SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["validation"] , shuffle=_a , collate_fn=_a , batch_size=_a) return train_dataloader, eval_dataloader def lowerCamelCase__ ( _a , _a , _a , _a): model.eval() SCREAMING_SNAKE_CASE : Any = 0 for step, batch in enumerate(_a): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_a) SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather( (predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_a) - 1: SCREAMING_SNAKE_CASE : List[str] = predictions[: len(eval_dataloader.dataset) - samples_seen] SCREAMING_SNAKE_CASE : Dict = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_a , references=_a , ) SCREAMING_SNAKE_CASE : str = metric.compute() return eval_metric["accuracy"] def lowerCamelCase__ ( _a , _a): # Initialize accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE : List[str] = config["lr"] SCREAMING_SNAKE_CASE : int = int(config["num_epochs"]) SCREAMING_SNAKE_CASE : List[str] = int(config["seed"]) SCREAMING_SNAKE_CASE : int = int(config["batch_size"]) SCREAMING_SNAKE_CASE : Any = args.model_name_or_path set_seed(_a) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = get_dataloaders(_a , _a , _a) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a) # Instantiate optimizer SCREAMING_SNAKE_CASE : Optional[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) SCREAMING_SNAKE_CASE : Tuple = optimizer_cls(params=model.parameters() , lr=_a) if accelerator.state.deepspeed_plugin is not None: SCREAMING_SNAKE_CASE : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : List[str] = (len(_a) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): SCREAMING_SNAKE_CASE : Dict = get_linear_schedule_with_warmup( optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , ) else: SCREAMING_SNAKE_CASE : int = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare( _a , _a , _a , _a , _a) # We need to keep track of how many total steps we have iterated over SCREAMING_SNAKE_CASE : Tuple = 0 # We also need to keep track of the stating epoch so files are named properly SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : List[Any] = evaluate.load("glue" , "mrpc") SCREAMING_SNAKE_CASE : List[str] = num_epochs if args.partial_train_epoch is not None: SCREAMING_SNAKE_CASE : Optional[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) SCREAMING_SNAKE_CASE : str = args.resume_from_checkpoint.split("epoch_")[1] SCREAMING_SNAKE_CASE : str = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break SCREAMING_SNAKE_CASE : str = int(_a) + 1 SCREAMING_SNAKE_CASE : int = evaluation_loop(_a , _a , _a , _a) accelerator.print("resumed checkpoint performance:" , _a) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0]) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"]) with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json") , "r") as f: SCREAMING_SNAKE_CASE : List[str] = json.load(_a) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model SCREAMING_SNAKE_CASE : Optional[Any] = {} for epoch in range(_a , _a): model.train() for step, batch in enumerate(_a): SCREAMING_SNAKE_CASE : Any = model(**_a) SCREAMING_SNAKE_CASE : List[Any] = outputs.loss SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps accelerator.backward(_a) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 SCREAMING_SNAKE_CASE : Tuple = f"epoch_{epoch}" SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , _a) accelerator.save_state(_a) SCREAMING_SNAKE_CASE : int = evaluation_loop(_a , _a , _a , _a) SCREAMING_SNAKE_CASE : Tuple = accuracy SCREAMING_SNAKE_CASE : Dict = lr_scheduler.get_lr()[0] SCREAMING_SNAKE_CASE : Any = optimizer.param_groups[0]["lr"] SCREAMING_SNAKE_CASE : Optional[int] = epoch SCREAMING_SNAKE_CASE : Any = overall_step accelerator.print(f"epoch {epoch}:" , _a) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"state_{epoch}.json") , "w") as f: json.dump(_a , _a) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path" , type=_a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_a , ) parser.add_argument( "--output_dir" , type=_a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=_a , default=_a , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=_a , default=_a , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=_a , default=2 , help="Number of train epochs." , ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() SCREAMING_SNAKE_CASE : Optional[int] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(_a , _a) if __name__ == "__main__": main()
76
from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : int = None def __repr__( self : str ) -> str: """simple docstring""" return F"Node({self.data})" class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = None def __iter__( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self : str ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return "->".join([str(a ) for item in self] ) def __getitem__( self : List[Any] , a : int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Tuple , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) SCREAMING_SNAKE_CASE : str = self.head for _ in range(a ): SCREAMING_SNAKE_CASE : str = current.next SCREAMING_SNAKE_CASE : Any = data def __UpperCamelCase ( self : List[str] , a : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , a ) def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None: """simple docstring""" self.insert_nth(0 , a ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) SCREAMING_SNAKE_CASE : Any = Node(a ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head SCREAMING_SNAKE_CASE : List[Any] = new_node else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : int = new_node def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data """simple docstring""" print(self ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.delete_nth(0 ) def __UpperCamelCase ( self : Any ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next.next return delete_node.data def __UpperCamelCase ( self : List[Any] ) -> bool: """simple docstring""" return self.head is None def __UpperCamelCase ( self : Optional[int] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Any = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : List[Any] = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : Any = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : str = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : Optional[Any] = prev def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList() assert linked_list.is_empty() is True assert str(_a) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10): assert len(_a) == i linked_list.insert_nth(_a , i + 1) assert str(_a) == "->".join(str(_a) for i in range(1 , 11)) linked_list.insert_head(0) linked_list.insert_tail(11) assert str(_a) == "->".join(str(_a) for i in range(0 , 12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(_a) == 9 assert str(_a) == "->".join(str(_a) for i in range(1 , 10)) assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True for i in range(0 , 9): SCREAMING_SNAKE_CASE : str = -i assert all(linked_list[i] == -i for i in range(0 , 9)) is True linked_list.reverse() assert str(_a) == "->".join(str(_a) for i in range(-8 , 1)) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = [ -9, 100, Node(77345112), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10), None, None, 12.20, ] SCREAMING_SNAKE_CASE : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(_a) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head() assert result == -9 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail() assert result == 12.2 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10) assert result is None assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_a) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_a) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ): from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("Inserting 1st at head ").strip()) linked_list.insert_head(input("Inserting 2nd at head ").strip()) print("\nPrint list:") linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail ").strip()) linked_list.insert_tail(input("Inserting 2nd at tail ").strip()) print("\nPrint list:") linked_list.print_list() print("\nDelete head") linked_list.delete_head() print("Delete tail") linked_list.delete_tail() print("\nPrint list:") linked_list.print_list() print("\nReverse linked list") linked_list.reverse() print("\nPrint list:") linked_list.print_list() print("\nString representation of linked list:") print(_a) print("\nReading/changing Node data using indexing:") print(f"Element at Position 1: {linked_list[1]}") SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip() print("New list:") print(_a) print(f"length of linked_list is : {len(_a)}") if __name__ == "__main__": main()
76
1
a_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) # get the last node from the path SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : List[Any] = list(_a) new_path.append(_a) queue.append(_a) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_a) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( _a , _a , _a): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : Optional[Any] = set(_a) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) if node == target: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_a) queue.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
76
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger a_ = get_logger(__name__) class _UpperCamelCase ( enum.Enum ): '''simple docstring''' lowerCamelCase__ ='all_checks' lowerCamelCase__ ='basic_checks' lowerCamelCase__ ='no_checks' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a , _a=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedDownloadedFile(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else "" if len(_a) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error") logger.info("All the checksums matched successfully" + for_verification_name) class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreSplits(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedSplits(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : List[str] = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(_a) > 0: raise NonMatchingSplitsSizesError(str(_a)) logger.info("All the splits matched successfully.") def lowerCamelCase__ ( _a , _a = True): if record_checksum: SCREAMING_SNAKE_CASE : List[str] = shaaaa() with open(_a , "rb") as f: for chunk in iter(lambda: f.read(1 << 20) , b""): m.update(_a) SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest() else: SCREAMING_SNAKE_CASE : List[str] = None return {"num_bytes": os.path.getsize(_a), "checksum": checksum} def lowerCamelCase__ ( _a): if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
76
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[str] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: SCREAMING_SNAKE_CASE : Dict = 128 elif "12-12" in model_name: SCREAMING_SNAKE_CASE : List[str] = 12 SCREAMING_SNAKE_CASE : Optional[int] = 12 elif "14-14" in model_name: SCREAMING_SNAKE_CASE : Union[str, Any] = 14 SCREAMING_SNAKE_CASE : Tuple = 14 elif "16-16" in model_name: SCREAMING_SNAKE_CASE : Optional[Any] = 16 SCREAMING_SNAKE_CASE : str = 16 else: raise ValueError("Model not supported") SCREAMING_SNAKE_CASE : Optional[Any] = "huggingface/label-files" if "speech-commands" in model_name: SCREAMING_SNAKE_CASE : Union[str, Any] = 35 SCREAMING_SNAKE_CASE : Tuple = "speech-commands-v2-id2label.json" else: SCREAMING_SNAKE_CASE : Union[str, Any] = 527 SCREAMING_SNAKE_CASE : str = "audioset-id2label.json" SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r")) SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_a): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = idalabel SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( _a): if "module.v" in name: SCREAMING_SNAKE_CASE : int = name.replace("module.v" , "audio_spectrogram_transformer") if "cls_token" in name: SCREAMING_SNAKE_CASE : List[Any] = name.replace("cls_token" , "embeddings.cls_token") if "dist_token" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("dist_token" , "embeddings.distillation_token") if "pos_embed" in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("pos_embed" , "embeddings.position_embeddings") if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection") # transformer blocks if "blocks" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("blocks" , "encoder.layer") if "attn.proj" in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense") if "attn" in name: SCREAMING_SNAKE_CASE : List[Any] = name.replace("attn" , "attention.self") if "norm1" in name: SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("norm1" , "layernorm_before") if "norm2" in name: SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("norm2" , "layernorm_after") if "mlp.fc1" in name: SCREAMING_SNAKE_CASE : List[str] = name.replace("mlp.fc1" , "intermediate.dense") if "mlp.fc2" in name: SCREAMING_SNAKE_CASE : Tuple = name.replace("mlp.fc2" , "output.dense") # final layernorm if "audio_spectrogram_transformer.norm" in name: SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm") # classifier head if "module.mlp_head.0" in name: SCREAMING_SNAKE_CASE : str = name.replace("module.mlp_head.0" , "classifier.layernorm") if "module.mlp_head.1" in name: SCREAMING_SNAKE_CASE : Dict = name.replace("module.mlp_head.1" , "classifier.dense") return name def lowerCamelCase__ ( _a , _a): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE : List[Any] = orig_state_dict.pop(_a) if "qkv" in key: SCREAMING_SNAKE_CASE : str = key.split(".") SCREAMING_SNAKE_CASE : List[str] = int(key_split[3]) SCREAMING_SNAKE_CASE : Union[str, Any] = config.hidden_size if "weight" in key: SCREAMING_SNAKE_CASE : str = val[:dim, :] SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE : List[str] = val[-dim:, :] else: SCREAMING_SNAKE_CASE : Optional[int] = val[:dim] SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] SCREAMING_SNAKE_CASE : List[str] = val[-dim:] else: SCREAMING_SNAKE_CASE : Dict = val return orig_state_dict def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(_a , _a) @torch.no_grad() def lowerCamelCase__ ( _a , _a , _a=False): SCREAMING_SNAKE_CASE : int = get_audio_spectrogram_transformer_config(_a) SCREAMING_SNAKE_CASE : Optional[Any] = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name] SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_a , map_location="cpu") # remove some keys remove_keys(_a) # rename some keys SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(_a , _a) # load 🤗 model SCREAMING_SNAKE_CASE : Dict = ASTForAudioClassification(_a) model.eval() model.load_state_dict(_a) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 SCREAMING_SNAKE_CASE : List[str] = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978 SCREAMING_SNAKE_CASE : Tuple = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526 SCREAMING_SNAKE_CASE : List[Any] = 1024 if "speech-commands" not in model_name else 128 SCREAMING_SNAKE_CASE : Optional[int] = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a) if "speech-commands" in model_name: SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("speech_commands" , "v0.02" , split="validation") SCREAMING_SNAKE_CASE : Any = dataset[0]["audio"]["array"] else: SCREAMING_SNAKE_CASE : Dict = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = torchaudio.load(_a) SCREAMING_SNAKE_CASE : Any = waveform.squeeze().numpy() SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(_a , sampling_rate=16000 , return_tensors="pt") # forward pass SCREAMING_SNAKE_CASE : Optional[Any] = model(**_a) SCREAMING_SNAKE_CASE : int = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.8760, -7.0042, -8.6602]) elif model_name == "ast-finetuned-audioset-10-10-0.450": SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.1986, -7.0903, -8.2718]) elif model_name == "ast-finetuned-audioset-10-10-0.448": SCREAMING_SNAKE_CASE : str = torch.tensor([-2.6128, -8.0080, -9.4344]) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": SCREAMING_SNAKE_CASE : str = torch.tensor([-1.5080, -7.4534, -8.8917]) elif model_name == "ast-finetuned-audioset-12-12-0.447": SCREAMING_SNAKE_CASE : str = torch.tensor([-0.5050, -6.5833, -8.0843]) elif model_name == "ast-finetuned-audioset-14-14-0.443": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.3826, -7.0336, -8.2413]) elif model_name == "ast-finetuned-audioset-16-16-0.442": SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.2113, -6.9101, -8.3470]) elif model_name == "ast-finetuned-speech-commands-v2": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([6.1589, -8.0566, -8.7984]) else: raise ValueError("Unknown model name") if not torch.allclose(logits[0, :3] , _a , atol=1E-4): raise ValueError("Logits don't match") print("Looks ok!") if pytorch_dump_folder_path is not None: Path(_a).mkdir(exist_ok=_a) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(_a) print(f"Saving feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(_a) if push_to_hub: print("Pushing model and feature extractor to the hub...") model.push_to_hub(f"MIT/{model_name}") feature_extractor.push_to_hub(f"MIT/{model_name}") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _a , _a): # Load checkpoint SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu") SCREAMING_SNAKE_CASE : Dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE : List[str] = v else: SCREAMING_SNAKE_CASE : int = v SCREAMING_SNAKE_CASE : int = chkpt["params"] SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))} SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(_a , _a) print(f"Save configuration file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
76
1
class _UpperCamelCase : '''simple docstring''' def __init__( self : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : Optional[int] = {} def __UpperCamelCase ( self : Tuple , a : Any ) -> Any: """simple docstring""" if vertex not in self.adjacency: SCREAMING_SNAKE_CASE : Union[str, Any] = {} self.num_vertices += 1 def __UpperCamelCase ( self : Union[str, Any] , a : str , a : Tuple , a : Optional[int] ) -> Tuple: """simple docstring""" self.add_vertex(a ) self.add_vertex(a ) if head == tail: return SCREAMING_SNAKE_CASE : Any = weight SCREAMING_SNAKE_CASE : int = weight def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.get_edges() for edge in edges: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = edge edges.remove((tail, head, weight) ) for i in range(len(a ) ): SCREAMING_SNAKE_CASE : Optional[int] = list(edges[i] ) edges.sort(key=lambda a : e[2] ) for i in range(len(a ) - 1 ): if edges[i][2] >= edges[i + 1][2]: SCREAMING_SNAKE_CASE : List[Any] = edges[i][2] + 1 for edge in edges: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = edge SCREAMING_SNAKE_CASE : Optional[int] = weight SCREAMING_SNAKE_CASE : Tuple = weight def __str__( self : Union[str, Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = "" for tail in self.adjacency: for head in self.adjacency[tail]: SCREAMING_SNAKE_CASE : Optional[Any] = self.adjacency[head][tail] string += F"{head} -> {tail} == {weight}\n" return string.rstrip("\n" ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return self.adjacency.keys() @staticmethod def __UpperCamelCase ( a : Dict=None , a : Optional[Any]=None ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = Graph() if vertices is None: SCREAMING_SNAKE_CASE : str = [] if edges is None: SCREAMING_SNAKE_CASE : List[Any] = [] for vertex in vertices: g.add_vertex(a ) for edge in edges: g.add_edge(*a ) return g class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : Tuple = {} def __len__( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return len(self.parent ) def __UpperCamelCase ( self : Optional[Any] , a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if item in self.parent: return self.find(a ) SCREAMING_SNAKE_CASE : Any = item SCREAMING_SNAKE_CASE : Any = 0 return item def __UpperCamelCase ( self : int , a : Union[str, Any] ) -> int: """simple docstring""" if item not in self.parent: return self.make_set(a ) if item != self.parent[item]: SCREAMING_SNAKE_CASE : List[str] = self.find(self.parent[item] ) return self.parent[item] def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.find(a ) SCREAMING_SNAKE_CASE : str = self.find(a ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: SCREAMING_SNAKE_CASE : Dict = roota return roota if self.rank[roota] < self.rank[roota]: SCREAMING_SNAKE_CASE : List[Any] = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 SCREAMING_SNAKE_CASE : Optional[int] = roota return roota return None @staticmethod def __UpperCamelCase ( a : Optional[int] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = graph.num_vertices SCREAMING_SNAKE_CASE : int = Graph.UnionFind() SCREAMING_SNAKE_CASE : Union[str, Any] = [] while num_components > 1: SCREAMING_SNAKE_CASE : Union[str, Any] = {} for vertex in graph.get_vertices(): SCREAMING_SNAKE_CASE : Optional[int] = -1 SCREAMING_SNAKE_CASE : str = graph.get_edges() for edge in edges: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = edge edges.remove((tail, head, weight) ) for edge in edges: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = edge SCREAMING_SNAKE_CASE : Any = union_find.find(a ) SCREAMING_SNAKE_CASE : Dict = union_find.find(a ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: SCREAMING_SNAKE_CASE : Tuple = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: SCREAMING_SNAKE_CASE : Tuple = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = cheap_edge[vertex] if union_find.find(a ) != union_find.find(a ): union_find.union(a , a ) mst_edges.append(cheap_edge[vertex] ) SCREAMING_SNAKE_CASE : Optional[Any] = num_components - 1 SCREAMING_SNAKE_CASE : int = Graph.build(edges=a ) return mst
76
def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(a - b) for a, b in zip(_a , _a))) def lowerCamelCase__ ( _a): if point: if isinstance(_a , _a): for item in point: if not isinstance(_a , (int, float)): SCREAMING_SNAKE_CASE : List[Any] = ( "Expected a list of numbers as input, found " f"{type(_a).__name__}" ) raise TypeError(_a) else: SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}" raise TypeError(_a) else: raise ValueError("Missing an input") def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(x - y) for x, y in zip(_a , _a))) if __name__ == "__main__": import doctest doctest.testmod()
76
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a_ = '▁' a_ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =BigBirdTokenizer lowerCamelCase__ =BigBirdTokenizerFast lowerCamelCase__ =True lowerCamelCase__ =True def __UpperCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(a , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = "<s>" SCREAMING_SNAKE_CASE : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(a ) , 1004 ) def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Tuple = "I was born in 92000, and this is falsé." SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(a ) SCREAMING_SNAKE_CASE : Any = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(a , add_special_tokens=a ) SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(a ) SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdTokenizer(a , keep_accents=a ) SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = "Hello World!" SCREAMING_SNAKE_CASE : int = [65, 1_8536, 2260, 101, 66] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @slow def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off SCREAMING_SNAKE_CASE : Optional[int] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @require_torch @slow def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence SCREAMING_SNAKE_CASE : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10] SCREAMING_SNAKE_CASE : List[Any] = " ".join(a ) SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.encode_plus(a , return_tensors="pt" , return_token_type_ids=a ) SCREAMING_SNAKE_CASE : int = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a ) SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(attention_type="original_full" ) SCREAMING_SNAKE_CASE : Dict = BigBirdModel(a ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a ) model(**a ) @slow def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : str = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) SCREAMING_SNAKE_CASE : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='vit_msn' def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Tuple = patch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = qkv_bias
76
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ = logging.getLogger(__name__) a_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =field( default=__A , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__A )} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =field( default=__A , metadata={'help': 'The input training data file (a text file).'} ) lowerCamelCase__ =field( default=__A , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) lowerCamelCase__ =field(default=__A , metadata={'help': 'Whether ot not to use whole word mask.'} ) lowerCamelCase__ =field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) lowerCamelCase__ =field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) lowerCamelCase__ =field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) lowerCamelCase__ =field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) lowerCamelCase__ =field( default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def lowerCamelCase__ ( _a , _a , _a = False , _a = None , ): def _dataset(_a , _a=None): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask") return LineByLineWithRefDataset( tokenizer=_a , file_path=_a , block_size=args.block_size , ref_path=_a , ) return LineByLineTextDataset(tokenizer=_a , file_path=_a , block_size=args.block_size) else: return TextDataset( tokenizer=_a , file_path=_a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_a , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file) elif args.train_data_files: return ConcatDataset([_dataset(_a) for f in glob(args.train_data_files)]) else: return _dataset(args.train_data_file , args.train_ref_file) def lowerCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument.") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _a) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir) else: SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another" " script, save it,and load it from here, using --tokenizer_name") if model_args.model_name_or_path: SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=_a , cache_dir=model_args.cache_dir , ) else: logger.info("Training new model from scratch") SCREAMING_SNAKE_CASE : List[Any] = AutoModelWithLMHead.from_config(_a) model.resize_token_embeddings(len(_a)) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( "BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the" "--mlm flag (masked language modeling).") if data_args.block_size <= 0: SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_len # Our input block size will be the max possible for the model else: SCREAMING_SNAKE_CASE : Any = min(data_args.block_size , tokenizer.max_len) # Get datasets SCREAMING_SNAKE_CASE : str = ( get_dataset(_a , tokenizer=_a , cache_dir=model_args.cache_dir) if training_args.do_train else None ) SCREAMING_SNAKE_CASE : Any = ( get_dataset(_a , tokenizer=_a , evaluate=_a , cache_dir=model_args.cache_dir) if training_args.do_eval else None ) if config.model_type == "xlnet": SCREAMING_SNAKE_CASE : int = DataCollatorForPermutationLanguageModeling( tokenizer=_a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: SCREAMING_SNAKE_CASE : Dict = DataCollatorForWholeWordMask( tokenizer=_a , mlm_probability=data_args.mlm_probability) else: SCREAMING_SNAKE_CASE : Optional[int] = DataCollatorForLanguageModeling( tokenizer=_a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability) # Initialize our Trainer SCREAMING_SNAKE_CASE : List[str] = Trainer( model=_a , args=_a , data_collator=_a , train_dataset=_a , eval_dataset=_a , prediction_loss_only=_a , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE : int = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path) else None ) trainer.train(model_path=_a) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation SCREAMING_SNAKE_CASE : Any = {} if training_args.do_eval: logger.info("*** Evaluate ***") SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate() SCREAMING_SNAKE_CASE : Dict = math.exp(eval_output["eval_loss"]) SCREAMING_SNAKE_CASE : str = {"perplexity": perplexity} SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "eval_results_lm.txt") if trainer.is_world_master(): with open(_a , "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s" , _a , str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) results.update(_a) return results def lowerCamelCase__ ( _a): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
76
import baseaa def lowerCamelCase__ ( _a): return baseaa.aaaencode(string.encode("utf-8")) def lowerCamelCase__ ( _a): return baseaa.aaadecode(_a).decode("utf-8") if __name__ == "__main__": import doctest doctest.testmod()
76
1
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCamelCase__ ( _a , _a , _a=[]): SCREAMING_SNAKE_CASE : List[str] = size[0] - overlap_pixels * 2 SCREAMING_SNAKE_CASE : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels SCREAMING_SNAKE_CASE : str = np.ones((size_y, size_x) , dtype=np.uinta) * 255 SCREAMING_SNAKE_CASE : Dict = np.pad(_a , mode="linear_ramp" , pad_width=_a , end_values=0) if "l" in remove_borders: SCREAMING_SNAKE_CASE : Tuple = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: SCREAMING_SNAKE_CASE : Any = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: SCREAMING_SNAKE_CASE : Optional[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: SCREAMING_SNAKE_CASE : Any = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCamelCase__ ( _a , _a , _a): return max(_a , min(_a , _a)) def lowerCamelCase__ ( _a , _a , _a): return ( clamp(rect[0] , min[0] , max[0]), clamp(rect[1] , min[1] , max[1]), clamp(rect[2] , min[0] , max[0]), clamp(rect[3] , min[1] , max[1]), ) def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : Union[str, Any] = list(_a) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap SCREAMING_SNAKE_CASE : Any = clamp_rect(_a , [0, 0] , [image_size[0], image_size[1]]) return rect def lowerCamelCase__ ( _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Union[str, Any] = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1])) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC).crop( (slice_x, 0, slice_x + original_slice, tile.size[1])) , (0, 0) , ) result.paste(_a , (original_slice, 0)) return result def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) SCREAMING_SNAKE_CASE : Tuple = tile.crop(_a) return tile def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Union[str, Any] = n % d return n - divisor class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Optional[int] , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : DDPMScheduler , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : int = 350 , ) -> str: """simple docstring""" super().__init__( vae=a , text_encoder=a , tokenizer=a , unet=a , low_res_scheduler=a , scheduler=a , max_noise_level=a , ) def __UpperCamelCase ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Optional[int] , a : List[str] , a : List[Any] , a : int , a : int , **a : str ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) SCREAMING_SNAKE_CASE : int = add_overlap_rect(a , a , image.size ) SCREAMING_SNAKE_CASE : Dict = image.crop(a ) SCREAMING_SNAKE_CASE : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] SCREAMING_SNAKE_CASE : Union[str, Any] = translated_slice_x - (original_image_slice / 2) SCREAMING_SNAKE_CASE : Dict = max(0 , a ) SCREAMING_SNAKE_CASE : Tuple = squeeze_tile(a , a , a , a ) SCREAMING_SNAKE_CASE : Any = to_input.size SCREAMING_SNAKE_CASE : List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) SCREAMING_SNAKE_CASE : List[str] = super(a , self ).__call__(image=a , **a ).images[0] SCREAMING_SNAKE_CASE : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) SCREAMING_SNAKE_CASE : int = unsqueeze_tile(a , a ) SCREAMING_SNAKE_CASE : Any = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) SCREAMING_SNAKE_CASE : Any = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) SCREAMING_SNAKE_CASE : List[str] = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=a ) , mode="L" , ) final_image.paste( a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , a ) @torch.no_grad() def __call__( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[PIL.Image.Image, List[PIL.Image.Image]] , a : int = 75 , a : float = 9.0 , a : int = 50 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , a : int = 128 , a : int = 32 , a : int = 32 , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) SCREAMING_SNAKE_CASE : Tuple = math.ceil(image.size[0] / tile_size ) SCREAMING_SNAKE_CASE : Tuple = math.ceil(image.size[1] / tile_size ) SCREAMING_SNAKE_CASE : Optional[int] = tcx * tcy SCREAMING_SNAKE_CASE : int = 0 for y in range(a ): for x in range(a ): self._process_tile( a , a , a , a , a , a , a , prompt=a , num_inference_steps=a , guidance_scale=a , noise_level=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowerCamelCase__ ( ): # Run a demo SCREAMING_SNAKE_CASE : int = "stabilityai/stable-diffusion-x4-upscaler" SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionTiledUpscalePipeline.from_pretrained(_a , revision="fp16" , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to("cuda") SCREAMING_SNAKE_CASE : Dict = Image.open("../../docs/source/imgs/diffusers_library.jpg") def callback(_a): print(f"progress: {obj['progress']:.4f}") obj["image"].save("diffusers_library_progress.jpg") SCREAMING_SNAKE_CASE : Tuple = pipe(image=_a , prompt="Black font, white background, vector" , noise_level=40 , callback=_a) final_image.save("diffusers_library.jpg") if __name__ == "__main__": main()
76
from datetime import datetime as dt import os from github import Github a_ = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"]) SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers") SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open") for issue in open_issues: SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a) SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed") elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored.") if __name__ == "__main__": main()
76
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) a_ = logging.getLogger(__name__) a_ = {'facebook/bart-base': BartForConditionalGeneration} a_ = {'facebook/bart-base': BartTokenizer} def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph.") parser.add_argument( "--validation_file" , type=_a , default=_a , help="A csv or a json file containing the validation data.") parser.add_argument( "--max_length" , type=_a , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=_a , default=_a , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=_a , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_a , ) parser.add_argument( "--config_name" , type=_a , default=_a , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=_a , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=_a , default=_a , help="Where to store the final ONNX file.") SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() return args def lowerCamelCase__ ( _a , _a="cpu"): SCREAMING_SNAKE_CASE : Any = model_dict[model_name].from_pretrained(_a).to(_a) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_dict[model_name].from_pretrained(_a) if model_name in ["facebook/bart-base"]: SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : List[str] = 0 return huggingface_model, tokenizer def lowerCamelCase__ ( _a , _a , _a , _a , _a): model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : int = torch.jit.script(BARTBeamSearchGenerator(_a)) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = "My friends are cool but they eat too many carbs." SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt").to(model.device) SCREAMING_SNAKE_CASE : Dict = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_a , max_length=_a , early_stopping=_a , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( _a , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , _a , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=_a , ) logger.info("Model exported to {}".format(_a)) SCREAMING_SNAKE_CASE : List[Any] = remove_dup_initializers(os.path.abspath(_a)) logger.info("Deduplicated and optimized model written to {}".format(_a)) SCREAMING_SNAKE_CASE : Optional[Any] = onnxruntime.InferenceSession(_a) SCREAMING_SNAKE_CASE : List[Any] = ort_sess.run( _a , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(_a), "max_length": np.array(_a), "decoder_start_token_id": np.array(model.config.decoder_start_token_id), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3) logger.info("Model outputs from torch and ONNX Runtime are similar.") logger.info("Success.") def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[int] = parse_args() SCREAMING_SNAKE_CASE : Optional[int] = 5 SCREAMING_SNAKE_CASE : List[Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO) transformers.utils.logging.set_verbosity_error() SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(args.device) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = load_model_tokenizer(args.model_name_or_path , _a) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") model.to(_a) if args.max_length: SCREAMING_SNAKE_CASE : Any = args.max_length if args.num_beams: SCREAMING_SNAKE_CASE : Tuple = args.num_beams if args.output_file_path: SCREAMING_SNAKE_CASE : List[Any] = args.output_file_path else: SCREAMING_SNAKE_CASE : str = "BART.onnx" logger.info("Exporting model to ONNX") export_and_validate_model(_a , _a , _a , _a , _a) if __name__ == "__main__": main()
76
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(_a): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =['pixel_values'] def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256} SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a ) SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" ) SCREAMING_SNAKE_CASE : Dict = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop SCREAMING_SNAKE_CASE : int = crop_size SCREAMING_SNAKE_CASE : int = resample SCREAMING_SNAKE_CASE : Any = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : Tuple = offset SCREAMING_SNAKE_CASE : str = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a ) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"]) else: raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a , size=a , resample=a , data_format=a , **a ) def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE : str = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a ) def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = image.astype(np.floataa ) if offset: SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2) return rescale(a , scale=a , data_format=a , **a ) def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(a , mean=a , std=a , data_format=a , **a ) def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a ) if do_resize: SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a ) if do_center_crop: SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a ) if do_rescale: SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a ) if do_normalize: SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a ) SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a ) return image def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : int = size if size is not None else self.size SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a ) SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" ) if not valid_images(a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a ) SCREAMING_SNAKE_CASE : List[Any] = [ [ self._preprocess_image( image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos} return BatchFeature(data=a , tensor_type=a )
76
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING a_ = logging.get_logger(__name__) a_ = Dict[str, Any] a_ = List[Prediction] @add_end_docstrings(__A ) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : List[str] , *a : Optional[Any] , **a : Tuple ) -> Union[str, Any]: """simple docstring""" super().__init__(*a , **a ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __UpperCamelCase ( self : Any , **a : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} if "threshold" in kwargs: SCREAMING_SNAKE_CASE : Optional[Any] = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : List[Any] , *a : List[Any] , **a : Optional[int] ) -> Union[Predictions, List[Prediction]]: """simple docstring""" return super().__call__(*a , **a ) def __UpperCamelCase ( self : Dict , a : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : str = load_image(a ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.IntTensor([[image.height, image.width]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : Any = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) SCREAMING_SNAKE_CASE : Dict = target_size return inputs def __UpperCamelCase ( self : str , a : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = model_inputs.pop("target_size" ) SCREAMING_SNAKE_CASE : Optional[int] = self.model(**a ) SCREAMING_SNAKE_CASE : Tuple = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: SCREAMING_SNAKE_CASE : int = model_inputs["bbox"] return model_outputs def __UpperCamelCase ( self : str , a : Optional[Any] , a : Union[str, Any]=0.9 ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = target_size[0].tolist() def unnormalize(a : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) SCREAMING_SNAKE_CASE : Tuple = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] SCREAMING_SNAKE_CASE : int = [unnormalize(a ) for bbox in model_outputs["bbox"].squeeze(0 )] SCREAMING_SNAKE_CASE : str = ["score", "label", "box"] SCREAMING_SNAKE_CASE : List[Any] = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel SCREAMING_SNAKE_CASE : int = self.image_processor.post_process_object_detection(a , a , a ) SCREAMING_SNAKE_CASE : List[Any] = raw_annotations[0] SCREAMING_SNAKE_CASE : Union[str, Any] = raw_annotation["scores"] SCREAMING_SNAKE_CASE : Dict = raw_annotation["labels"] SCREAMING_SNAKE_CASE : Dict = raw_annotation["boxes"] SCREAMING_SNAKE_CASE : Optional[Any] = scores.tolist() SCREAMING_SNAKE_CASE : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels] SCREAMING_SNAKE_CASE : int = [self._get_bounding_box(a ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] SCREAMING_SNAKE_CASE : int = ["score", "label", "box"] SCREAMING_SNAKE_CASE : Tuple = [ dict(zip(a , a ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def __UpperCamelCase ( self : Union[str, Any] , a : "torch.Tensor" ) -> Dict[str, int]: """simple docstring""" if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = box.int().tolist() SCREAMING_SNAKE_CASE : int = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
76
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt'} a_ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a_ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } a_ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =ConvBertTokenizer def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict: """simple docstring""" super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , a ) != do_lower_case or normalizer_state.get("strip_accents" , a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : Any = strip_accents SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a ) SCREAMING_SNAKE_CASE : str = do_lower_case def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a ) return tuple(a )
76
1
from collections.abc import Sequence def lowerCamelCase__ ( _a , _a = False): if not arr: return 0 SCREAMING_SNAKE_CASE : int = 0 if allow_empty_subarrays else float("-inf") SCREAMING_SNAKE_CASE : Optional[int] = 0.0 for num in arr: SCREAMING_SNAKE_CASE : int = max(0 if allow_empty_subarrays else num , curr_sum + num) SCREAMING_SNAKE_CASE : Optional[Any] = max(_a , _a) return max_sum if __name__ == "__main__": from doctest import testmod testmod() a_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
76
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a_ = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase__ ( _a): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_a) def lowerCamelCase__ ( _a): from diffusers.utils.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(_a , id=_a)
76
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : Tuple = min_resolution SCREAMING_SNAKE_CASE : str = max_resolution SCREAMING_SNAKE_CASE : int = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : int = do_normalize SCREAMING_SNAKE_CASE : Tuple = image_mean SCREAMING_SNAKE_CASE : Tuple = image_std def __UpperCamelCase ( self : Any ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self ) @property def __UpperCamelCase ( self : Any ) -> List[str]: """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , "image_mean" ) ) self.assertTrue(hasattr(a , "image_std" ) ) self.assertTrue(hasattr(a , "do_normalize" ) ) self.assertTrue(hasattr(a , "do_resize" ) ) self.assertTrue(hasattr(a , "size" ) ) def __UpperCamelCase ( self : int ) -> str: """simple docstring""" pass def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
76
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor a_ = logging.get_logger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Dict , *a : Dict , **a : int ) -> None: """simple docstring""" warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , a , ) super().__init__(*a , **a )
76
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"] SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"]) return output a_ = HfArgumentParser(PretokenizationArguments) a_ = parser.parse_args() if args.num_workers is None: a_ = multiprocessing.cpu_count() a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) a_ = time.time() a_ = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') a_ = time.time() a_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') a_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
76
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a_ = 25_0004 a_ = 25_0020 @require_sentencepiece @require_tokenizers class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MBartaaTokenizer lowerCamelCase__ =MBartaaTokenizerFast lowerCamelCase__ =True lowerCamelCase__ =True def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : Tuple = MBartaaTokenizer(a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = "<s>" SCREAMING_SNAKE_CASE : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(a ) , 1054 ) def __UpperCamelCase ( self : Any ) -> Tuple: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = MBartaaTokenizer(a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(a , **a ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(a , **a ) SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(a ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.save_pretrained(a ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(a , a ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(a ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a , a ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(a ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(a , legacy_format=a ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(a ) # Checks it save with the same files self.assertSequenceEqual(a , a ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a , a ) ) shutil.rmtree(a ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.save_pretrained(a , legacy_format=a ) SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(a ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(a ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a , a ) ) shutil.rmtree(a ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ ='facebook/mbart-large-50-one-to-many-mmt' lowerCamelCase__ =[ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowerCamelCase__ =[ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowerCamelCase__ =[EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def __UpperCamelCase ( cls : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) SCREAMING_SNAKE_CASE : Optional[Any] = 1 return cls def __UpperCamelCase ( self : str ) -> Optional[int]: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 ) def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a ) def __UpperCamelCase ( self : Any ) -> Optional[int]: """simple docstring""" self.assertIn(a , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE : Dict = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(a , skip_special_tokens=a ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a ) self.assertEqual(a , a ) self.assertNotIn(self.tokenizer.eos_token , a ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , a ) SCREAMING_SNAKE_CASE : Tuple = 10 SCREAMING_SNAKE_CASE : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0] self.assertEqual(ids[0] , a ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(a ) , a ) def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] ) def __UpperCamelCase ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(a ) SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(a ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a ) @require_torch def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" ) SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(a , a ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , a ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=a , truncation=a , max_length=10 , return_tensors="pt" ) SCREAMING_SNAKE_CASE : str = targets["input_ids"] SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(a , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __UpperCamelCase ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(a ) , { # en_XX, A, test, EOS "input_ids": [[25_0004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_0001, } , )
76
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): # initialize config if "resnet-50" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50") elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101") else: raise ValueError("Model name should include either resnet50 or resnet101") SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a) # set label attributes SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE : Union[str, Any] = 250 else: SCREAMING_SNAKE_CASE : Union[str, Any] = 91 SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r")) SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase__ ( _a): # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE : Union[str, Any] = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", )) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", )) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", )) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ]) return rename_keys def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : str = state_dict.pop(_a) SCREAMING_SNAKE_CASE : int = val def lowerCamelCase__ ( _a , _a=False): SCREAMING_SNAKE_CASE : Optional[Any] = "" if is_panoptic: SCREAMING_SNAKE_CASE : Optional[int] = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : int = in_proj_bias[:256] SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight") SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw) return im @torch.no_grad() def lowerCamelCase__ ( _a , _a=None , _a=False): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a) # load original model from torch hub SCREAMING_SNAKE_CASE : Union[str, Any] = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"Converting model {model_name}...") SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval() SCREAMING_SNAKE_CASE : Tuple = detr.state_dict() # rename keys for src, dest in create_rename_keys(_a): if is_panoptic: SCREAMING_SNAKE_CASE : List[str] = "detr." + src rename_key(_a , _a , _a) # query, key and value matrices need special treatment read_in_q_k_v(_a , is_panoptic=_a) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Optional[int] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : List[Any] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Any = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a) model.load_state_dict(_a) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a) SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt") SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a) SCREAMING_SNAKE_CASE : Any = model(_a) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(_a).mkdir(exist_ok=_a) model.save_pretrained(_a) processor.save_pretrained(_a) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : int , a : Optional[Any]=-1 ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = label_idx def __UpperCamelCase ( self : List[str] , a : Union[str, Any] , a : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(a , a ): SCREAMING_SNAKE_CASE : Any = mode.value SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a , F"{mode}.txt" ) SCREAMING_SNAKE_CASE : List[str] = 1 SCREAMING_SNAKE_CASE : int = [] with open(a , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : int = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) guid_index += 1 SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Optional[int] = [] else: SCREAMING_SNAKE_CASE : Optional[Any] = line.split(" " ) words.append(splits[0] ) if len(a ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) return examples def __UpperCamelCase ( self : Optional[Any] , a : TextIO , a : TextIO , a : List ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE : str = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(a ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __UpperCamelCase ( self : Dict , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: SCREAMING_SNAKE_CASE : List[str] = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE : Dict = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : List[Any] ) -> Dict: """simple docstring""" super().__init__(label_idx=-2 ) def __UpperCamelCase ( self : Optional[Any] , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: SCREAMING_SNAKE_CASE : Optional[Any] = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE : List[Any] = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _UpperCamelCase ( __A ): '''simple docstring''' def __UpperCamelCase ( self : str , a : str , a : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(a , a ): SCREAMING_SNAKE_CASE : Dict = mode.value SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a , F"{mode}.txt" ) SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : List[str] = [] with open(a , encoding="utf-8" ) as f: for sentence in parse_incr(a ): SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Tuple = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(a ) == len(a ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=a , labels=a ) ) guid_index += 1 return examples def __UpperCamelCase ( self : int , a : TextIO , a : TextIO , a : List ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 for sentence in parse_incr(a ): SCREAMING_SNAKE_CASE : Optional[Any] = preds_list[example_id] SCREAMING_SNAKE_CASE : Optional[int] = "" for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(a ) example_id += 1 def __UpperCamelCase ( self : List[Any] , a : str ) -> List[str]: """simple docstring""" if path: with open(a , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
76
import os def lowerCamelCase__ ( ): with open(os.path.dirname(_a) + "/p022_names.txt") as file: SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0]) SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",") names.sort() SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Dict = 0 for i, name in enumerate(_a): for letter in name: name_score += ord(_a) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE : str = 0 return total_score if __name__ == "__main__": print(solution())
76
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = 'https://openaipublic.azureedge.net/jukebox/models/' a_ = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def lowerCamelCase__ ( _a): if key.endswith(".model.1.bias") and len(key.split(".")) > 10: SCREAMING_SNAKE_CASE : Any = key.replace(".model.1.bias" , ".conv1d_1.bias") elif key.endswith(".model.1.weight") and len(key.split(".")) > 10: SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".model.1.weight" , ".conv1d_1.weight") elif key.endswith(".model.3.bias") and len(key.split(".")) > 10: SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias") elif key.endswith(".model.3.weight") and len(key.split(".")) > 10: SCREAMING_SNAKE_CASE : int = key.replace(".model.3.weight" , ".conv1d_2.weight") if "conditioner_blocks.0." in key: SCREAMING_SNAKE_CASE : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks") if "prime_prior" in key: SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("prime_prior" , "encoder") if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(".emb." , ".") if key.endswith("k"): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook") if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding.") if "x_emb.emb." in key: SCREAMING_SNAKE_CASE : Dict = key.replace("0.x_emb.emb" , "embed_tokens") if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm") if ".ln" in key: return key.replace(".ln" , ".layer_norm") if "_ln" in key: return key.replace("_ln" , "_layer_norm") if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in") if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head") if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out") if "x_emb" in key: return key.replace("x_emb" , "embed_tokens") return key def lowerCamelCase__ ( _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Tuple = {} import re SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)") SCREAMING_SNAKE_CASE : Any = re.compile( r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)") SCREAMING_SNAKE_CASE : List[str] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)") SCREAMING_SNAKE_CASE : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)") SCREAMING_SNAKE_CASE : Dict = re.compile( r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)") SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)") SCREAMING_SNAKE_CASE : Dict = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)") SCREAMING_SNAKE_CASE : List[Any] = re.compile( r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)") SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)") for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(_a): SCREAMING_SNAKE_CASE : str = re_encoder_block_conv_in.match(_a) SCREAMING_SNAKE_CASE : List[Any] = regex_match.groups() SCREAMING_SNAKE_CASE : List[str] = int(groups[2]) * 2 + int(groups[3]) SCREAMING_SNAKE_CASE : Any = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : int = re_encoder_block_conv_in.sub(_a , _a) elif re_encoder_block_resnet.fullmatch(_a): SCREAMING_SNAKE_CASE : Optional[int] = re_encoder_block_resnet.match(_a) SCREAMING_SNAKE_CASE : Dict = regex_match.groups() SCREAMING_SNAKE_CASE : List[Any] = int(groups[2]) * 2 + int(groups[3]) SCREAMING_SNAKE_CASE : Optional[Any] = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE : List[Any] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." SCREAMING_SNAKE_CASE : Optional[Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : Dict = prefix + resnet_block SCREAMING_SNAKE_CASE : Tuple = re_encoder_block_resnet.sub(_a , _a) elif re_encoder_block_proj_out.fullmatch(_a): SCREAMING_SNAKE_CASE : Optional[Any] = re_encoder_block_proj_out.match(_a) SCREAMING_SNAKE_CASE : List[str] = regex_match.groups() SCREAMING_SNAKE_CASE : Union[str, Any] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" SCREAMING_SNAKE_CASE : Optional[int] = re_encoder_block_proj_out.sub(_a , _a) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(_a): SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_conv_out.match(_a) SCREAMING_SNAKE_CASE : Any = regex_match.groups() SCREAMING_SNAKE_CASE : List[Any] = int(groups[2]) * 2 + int(groups[3]) - 2 SCREAMING_SNAKE_CASE : str = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : int = re_decoder_block_conv_out.sub(_a , _a) elif re_decoder_block_resnet.fullmatch(_a): SCREAMING_SNAKE_CASE : int = re_decoder_block_resnet.match(_a) SCREAMING_SNAKE_CASE : str = regex_match.groups() SCREAMING_SNAKE_CASE : List[str] = int(groups[2]) * 2 + int(groups[3]) - 2 SCREAMING_SNAKE_CASE : List[Any] = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE : Tuple = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." SCREAMING_SNAKE_CASE : str = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : Union[str, Any] = prefix + resnet_block SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_resnet.sub(_a , _a) elif re_decoder_block_proj_in.fullmatch(_a): SCREAMING_SNAKE_CASE : List[str] = re_decoder_block_proj_in.match(_a) SCREAMING_SNAKE_CASE : Any = regex_match.groups() SCREAMING_SNAKE_CASE : List[str] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_proj_in.sub(_a , _a) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(_a): SCREAMING_SNAKE_CASE : Optional[int] = re_prior_cond_conv_out.match(_a) SCREAMING_SNAKE_CASE : int = regex_match.groups() SCREAMING_SNAKE_CASE : Optional[int] = int(groups[1]) * 2 + int(groups[2]) - 2 SCREAMING_SNAKE_CASE : Optional[Any] = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : Any = re_prior_cond_conv_out.sub(_a , _a) elif re_prior_cond_resnet.fullmatch(_a): SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_resnet.match(_a) SCREAMING_SNAKE_CASE : int = regex_match.groups() SCREAMING_SNAKE_CASE : Tuple = int(groups[1]) * 2 + int(groups[2]) - 2 SCREAMING_SNAKE_CASE : Dict = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE : Tuple = f"conditioner_blocks.upsampler.upsample_block.{block_index}." SCREAMING_SNAKE_CASE : Any = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE : Optional[Any] = prefix + resnet_block SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_resnet.sub(_a , _a) elif re_prior_cond_proj_in.fullmatch(_a): SCREAMING_SNAKE_CASE : Optional[int] = re_prior_cond_proj_in.match(_a) SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups() SCREAMING_SNAKE_CASE : Any = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE : Dict = re_prior_cond_proj_in.sub(_a , _a) # keep original key else: SCREAMING_SNAKE_CASE : List[Any] = original_key SCREAMING_SNAKE_CASE : Optional[int] = replace_key(_a) if f"{key_prefix}.{key}" not in model_state_dict or key is None: print(f"failed converting {original_key} to {key}, does not match") # handle missmatched shape elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape: SCREAMING_SNAKE_CASE : Tuple = model_state_dict[f"{key_prefix}.{key}"] print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match") SCREAMING_SNAKE_CASE : List[Any] = original_key SCREAMING_SNAKE_CASE : str = original_key SCREAMING_SNAKE_CASE : List[str] = value return new_dict @torch.no_grad() def lowerCamelCase__ ( _a=None , _a=None): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}"): SCREAMING_SNAKE_CASE : List[Any] = requests.get(f"{PREFIX}{file}" , allow_redirects=_a) os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=_a) open(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}" , "wb").write(r.content) SCREAMING_SNAKE_CASE : Tuple = MODEL_MAPPING[model_name.split("/")[-1]] SCREAMING_SNAKE_CASE : Union[str, Any] = JukeboxConfig.from_pretrained(_a) SCREAMING_SNAKE_CASE : Any = JukeboxModel(_a) SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Tuple = {} for i, dict_name in enumerate(_a): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/')[-1]}")["model"] SCREAMING_SNAKE_CASE : str = {} for k in old_dic.keys(): if k.endswith(".b"): SCREAMING_SNAKE_CASE : List[str] = old_dic[k] elif k.endswith(".w"): SCREAMING_SNAKE_CASE : Optional[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: SCREAMING_SNAKE_CASE : List[Any] = old_dic[k] else: SCREAMING_SNAKE_CASE : Union[str, Any] = old_dic[k] SCREAMING_SNAKE_CASE : Tuple = "vqvae" if i == 0 else f"priors.{3 - i}" SCREAMING_SNAKE_CASE : int = fix_jukebox_keys(_a , model.state_dict() , _a , _a) weight_dict.append(_a) SCREAMING_SNAKE_CASE : Tuple = weight_dict.pop(0) model.vqvae.load_state_dict(_a) for i in range(len(_a)): model.priors[i].load_state_dict(weight_dict[2 - i]) Path(_a).mkdir(exist_ok=_a) with open(f"{pytorch_dump_folder_path}/mapping.json" , "w") as txtfile: json.dump(_a , _a) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(_a) return weight_dict if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) a_ = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
76
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
1
def lowerCamelCase__ ( _a , _a , _a): return round(float(moles / volume) * nfactor) def lowerCamelCase__ ( _a , _a , _a): return round(float((moles * 0.0821 * temperature) / (volume))) def lowerCamelCase__ ( _a , _a , _a): return round(float((moles * 0.0821 * temperature) / (pressure))) def lowerCamelCase__ ( _a , _a , _a): return round(float((pressure * volume) / (0.0821 * moles))) if __name__ == "__main__": import doctest doctest.testmod()
76
def lowerCamelCase__ ( _a , _a): return int((input_a, input_a).count(1) != 0) def lowerCamelCase__ ( ): assert or_gate(0 , 0) == 0 assert or_gate(0 , 1) == 1 assert or_gate(1 , 0) == 1 assert or_gate(1 , 1) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
76
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' a_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' a_ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __UpperCamelCase ( self : Any ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def __UpperCamelCase ( self : str , a : Optional[int] , a : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = 0.0 for i, j in zip(a , a ): n_correct += 1.0 if math_equivalence.is_equiv(a , a ) else 0.0 SCREAMING_SNAKE_CASE : Optional[int] = n_correct / len(a ) return { "accuracy": accuracy, }
76
a_ = 8.314_4598 def lowerCamelCase__ ( _a , _a): if temperature < 0: raise Exception("Temperature cannot be less than 0 K") if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ = 300 a_ = 28 a_ = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
76
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } SCREAMING_SNAKE_CASE : int = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(a ) , a ) def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Tuple = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : List[Any] = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Any = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Any = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : int = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def __UpperCamelCase ( self : int ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : Dict = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : str = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 ) SCREAMING_SNAKE_CASE : List[str] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 ) SCREAMING_SNAKE_CASE : str = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def __UpperCamelCase ( self : str ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Dict = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
76
a_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) # get the last node from the path SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: SCREAMING_SNAKE_CASE : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE : List[Any] = list(_a) new_path.append(_a) queue.append(_a) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_a) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( _a , _a , _a): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE : str = [start] SCREAMING_SNAKE_CASE : Optional[Any] = set(_a) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0) if node == target: SCREAMING_SNAKE_CASE : Union[str, Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_a) queue.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
76
1
import requests def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Dict = {"Content-Type": "application/json"} SCREAMING_SNAKE_CASE : Optional[int] = requests.post(_a , json={"text": message_body} , headers=_a) if response.status_code != 200: SCREAMING_SNAKE_CASE : Dict = ( "Request to slack returned an error " f"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(_a) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
76
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
76
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt'} a_ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a_ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } a_ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =ConvBertTokenizer def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict: """simple docstring""" super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , a ) != do_lower_case or normalizer_state.get("strip_accents" , a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : Any = strip_accents SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a ) SCREAMING_SNAKE_CASE : str = do_lower_case def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a ) return tuple(a )
76
from math import factorial def lowerCamelCase__ ( _a , _a , _a): if successes > trials: raise ValueError("successes must be lower or equal to trials") if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers") if not isinstance(_a , _a) or not isinstance(_a , _a): raise ValueError("the function is defined for non-negative integers") if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0") SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a)) coefficient /= factorial(_a) * factorial(trials - successes) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =CustomTokenizer pass
76
1
from __future__ import annotations def lowerCamelCase__ ( _a , _a , _a): if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0") if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0") if principal <= 0: raise ValueError("principal must be > 0") return principal * daily_interest_rate * days_between_payments def lowerCamelCase__ ( _a , _a , _a , ): if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0") if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0") if principal <= 0: raise ValueError("principal must be > 0") return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowerCamelCase__ ( _a , _a , _a , ): if number_of_years <= 0: raise ValueError("number_of_years must be > 0") if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0") if principal <= 0: raise ValueError("principal must be > 0") return compound_interest( _a , nominal_annual_percentage_rate / 365 , number_of_years * 365) if __name__ == "__main__": import doctest doctest.testmod()
76
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a_ = logging.getLogger(__name__) class _UpperCamelCase : '''simple docstring''' def __init__( self : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = False def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]: """simple docstring""" if not self.initialized: SCREAMING_SNAKE_CASE : List[str] = RagRetriever( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[int] = True def __UpperCamelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" self.retriever.index.init_index() def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a ) return doc_ids, retrieved_doc_embeds class _UpperCamelCase ( __A ): '''simple docstring''' def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]: """simple docstring""" if index is not None and index.is_initialized() and len(a ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , ) SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(a , a , a , a ) for worker in self.retrieval_workers ] ) def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) ) else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a ) @classmethod def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str: """simple docstring""" return super(a , cls ).get_tokenizers(a , a , **a ) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a ) SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a ) SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator if indexed_dataset is not None: SCREAMING_SNAKE_CASE : str = "custom" SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a ) else: SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a ) return cls( a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , )
76
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType a_ = logging.get_logger(__name__) a_ = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='layoutlmv3' def __init__( self : List[str] , a : Union[str, Any]=5_0265 , a : Optional[int]=768 , a : Optional[Any]=12 , a : Any=12 , a : int=3072 , a : Any="gelu" , a : List[Any]=0.1 , a : Dict=0.1 , a : Optional[int]=512 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Optional[int]=1e-5 , a : Tuple=1 , a : Dict=0 , a : List[Any]=2 , a : Optional[Any]=1024 , a : Optional[int]=128 , a : Any=128 , a : Union[str, Any]=True , a : int=32 , a : int=128 , a : Dict=64 , a : List[str]=256 , a : Tuple=True , a : int=True , a : str=True , a : Optional[int]=224 , a : int=3 , a : Tuple=16 , a : Dict=None , **a : str , ) -> Dict: """simple docstring""" super().__init__( vocab_size=a , hidden_size=a , num_hidden_layers=a , num_attention_heads=a , intermediate_size=a , hidden_act=a , hidden_dropout_prob=a , attention_probs_dropout_prob=a , max_position_embeddings=a , type_vocab_size=a , initializer_range=a , layer_norm_eps=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , ) SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings SCREAMING_SNAKE_CASE : Tuple = coordinate_size SCREAMING_SNAKE_CASE : Dict = shape_size SCREAMING_SNAKE_CASE : str = has_relative_attention_bias SCREAMING_SNAKE_CASE : List[str] = rel_pos_bins SCREAMING_SNAKE_CASE : Tuple = max_rel_pos SCREAMING_SNAKE_CASE : int = has_spatial_attention_bias SCREAMING_SNAKE_CASE : Dict = rel_ad_pos_bins SCREAMING_SNAKE_CASE : int = max_rel_ad_pos SCREAMING_SNAKE_CASE : Optional[int] = text_embed SCREAMING_SNAKE_CASE : str = visual_embed SCREAMING_SNAKE_CASE : Union[str, Any] = input_size SCREAMING_SNAKE_CASE : int = num_channels SCREAMING_SNAKE_CASE : List[Any] = patch_size SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =version.parse('1.12' ) @property def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def __UpperCamelCase ( self : Tuple ) -> float: """simple docstring""" return 1e-5 @property def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" return 12 def __UpperCamelCase ( self : List[str] , a : "ProcessorMixin" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , "apply_ocr" , a ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension( a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : Dict = processor.tokenizer.num_special_tokens_to_add(a ) SCREAMING_SNAKE_CASE : List[Any] = compute_effective_axis_dimension( a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE : Tuple = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes SCREAMING_SNAKE_CASE : Dict = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(a , a , a , a ) SCREAMING_SNAKE_CASE : Any = dict( processor( a , text=a , boxes=a , return_tensors=a , ) ) return inputs
76
from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = data SCREAMING_SNAKE_CASE : int = None def __repr__( self : str ) -> str: """simple docstring""" return F"Node({self.data})" class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = None def __iter__( self : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.head while node: yield node.data SCREAMING_SNAKE_CASE : List[str] = node.next def __len__( self : str ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return "->".join([str(a ) for item in self] ) def __getitem__( self : List[Any] , a : int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Tuple , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) SCREAMING_SNAKE_CASE : str = self.head for _ in range(a ): SCREAMING_SNAKE_CASE : str = current.next SCREAMING_SNAKE_CASE : Any = data def __UpperCamelCase ( self : List[str] , a : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , a ) def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None: """simple docstring""" self.insert_nth(0 , a ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) SCREAMING_SNAKE_CASE : Any = Node(a ) if self.head is None: SCREAMING_SNAKE_CASE : Optional[int] = new_node elif index == 0: SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head SCREAMING_SNAKE_CASE : List[Any] = new_node else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : Optional[int] = temp.next SCREAMING_SNAKE_CASE : int = new_node def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data """simple docstring""" print(self ) def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" return self.delete_nth(0 ) def __UpperCamelCase ( self : Any ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) SCREAMING_SNAKE_CASE : Tuple = self.head # default first node if index == 0: SCREAMING_SNAKE_CASE : List[str] = self.head.next else: SCREAMING_SNAKE_CASE : Optional[Any] = self.head for _ in range(index - 1 ): SCREAMING_SNAKE_CASE : Any = temp.next SCREAMING_SNAKE_CASE : List[Any] = temp.next SCREAMING_SNAKE_CASE : List[str] = temp.next.next return delete_node.data def __UpperCamelCase ( self : List[Any] ) -> bool: """simple docstring""" return self.head is None def __UpperCamelCase ( self : Optional[int] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = self.head while current: # Store the current node's next node. SCREAMING_SNAKE_CASE : Any = current.next # Make the current node's next point backwards SCREAMING_SNAKE_CASE : List[Any] = prev # Make the previous node be the current node SCREAMING_SNAKE_CASE : Any = current # Make the current node the next node (to progress iteration) SCREAMING_SNAKE_CASE : str = next_node # Return prev in order to put the head at the end SCREAMING_SNAKE_CASE : Optional[Any] = prev def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList() assert linked_list.is_empty() is True assert str(_a) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10): assert len(_a) == i linked_list.insert_nth(_a , i + 1) assert str(_a) == "->".join(str(_a) for i in range(1 , 11)) linked_list.insert_head(0) linked_list.insert_tail(11) assert str(_a) == "->".join(str(_a) for i in range(0 , 12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(_a) == 9 assert str(_a) == "->".join(str(_a) for i in range(1 , 10)) assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True for i in range(0 , 9): SCREAMING_SNAKE_CASE : str = -i assert all(linked_list[i] == -i for i in range(0 , 9)) is True linked_list.reverse() assert str(_a) == "->".join(str(_a) for i in range(-8 , 1)) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[Any] = [ -9, 100, Node(77345112), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10), None, None, 12.20, ] SCREAMING_SNAKE_CASE : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(_a) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head() assert result == -9 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail() assert result == 12.2 assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10) assert result is None assert ( str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_a) assert ( str(_a) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_a) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ): from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Optional[int] = LinkedList() linked_list.insert_head(input("Inserting 1st at head ").strip()) linked_list.insert_head(input("Inserting 2nd at head ").strip()) print("\nPrint list:") linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail ").strip()) linked_list.insert_tail(input("Inserting 2nd at tail ").strip()) print("\nPrint list:") linked_list.print_list() print("\nDelete head") linked_list.delete_head() print("Delete tail") linked_list.delete_tail() print("\nPrint list:") linked_list.print_list() print("\nReverse linked list") linked_list.reverse() print("\nPrint list:") linked_list.print_list() print("\nString representation of linked list:") print(_a) print("\nReading/changing Node data using indexing:") print(f"Element at Position 1: {linked_list[1]}") SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip() print("New list:") print(_a) print(f"length of linked_list is : {len(_a)}") if __name__ == "__main__": main()
76
1
def lowerCamelCase__ ( _a , _a): return int(input_a == input_a == 0) def lowerCamelCase__ ( ): print("Truth Table of NOR Gate:") print("| Input 1 | Input 2 | Output |") print(f"| 0 | 0 | {nor_gate(0 , 0)} |") print(f"| 0 | 1 | {nor_gate(0 , 1)} |") print(f"| 1 | 0 | {nor_gate(1 , 0)} |") print(f"| 1 | 1 | {nor_gate(1 , 1)} |") if __name__ == "__main__": import doctest doctest.testmod() main()
76
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger a_ = get_logger(__name__) class _UpperCamelCase ( enum.Enum ): '''simple docstring''' lowerCamelCase__ ='all_checks' lowerCamelCase__ ='basic_checks' lowerCamelCase__ ='no_checks' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a , _a=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedDownloadedFile(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else "" if len(_a) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error") logger.info("All the checksums matched successfully" + for_verification_name) class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' class _UpperCamelCase ( __A ): '''simple docstring''' def lowerCamelCase__ ( _a , _a): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(_a) - set(_a)) > 0: raise ExpectedMoreSplits(str(set(_a) - set(_a))) if len(set(_a) - set(_a)) > 0: raise UnexpectedSplits(str(set(_a) - set(_a))) SCREAMING_SNAKE_CASE : List[str] = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(_a) > 0: raise NonMatchingSplitsSizesError(str(_a)) logger.info("All the splits matched successfully.") def lowerCamelCase__ ( _a , _a = True): if record_checksum: SCREAMING_SNAKE_CASE : List[str] = shaaaa() with open(_a , "rb") as f: for chunk in iter(lambda: f.read(1 << 20) , b""): m.update(_a) SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest() else: SCREAMING_SNAKE_CASE : List[str] = None return {"num_bytes": os.path.getsize(_a), "checksum": checksum} def lowerCamelCase__ ( _a): if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
76
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder a_ = datasets.utils.logging.get_logger(__name__) class _UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCamelCase ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' lowerCamelCase__ =datasets.Audio() lowerCamelCase__ ='audio' lowerCamelCase__ =AudioFolderConfig lowerCamelCase__ =42 # definition at the bottom of the script lowerCamelCase__ =AudioClassification(audio_column='audio' , label_column='label' ) a_ = [ '.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus', ] a_ = AUDIO_EXTENSIONS
76
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _a , _a): # Load checkpoint SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu") SCREAMING_SNAKE_CASE : Dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE : List[str] = v else: SCREAMING_SNAKE_CASE : int = v SCREAMING_SNAKE_CASE : int = chkpt["params"] SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))} SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(_a , _a) print(f"Save configuration file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
76
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCamelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =StableDiffusionSAGPipeline lowerCamelCase__ =TEXT_TO_IMAGE_PARAMS lowerCamelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase__ =False def __UpperCamelCase ( self : str ) -> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(a ) SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE : Tuple = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __UpperCamelCase ( self : List[str] , a : Dict , a : List[str]=0 ) -> Optional[int]: """simple docstring""" if str(a ).startswith("mps" ): SCREAMING_SNAKE_CASE : str = torch.manual_seed(a ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=a ).manual_seed(a ) SCREAMING_SNAKE_CASE : List[str] = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Any = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) SCREAMING_SNAKE_CASE : Any = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Optional[Any] = "." SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) SCREAMING_SNAKE_CASE : List[Any] = output.images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : List[str] = "." SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) SCREAMING_SNAKE_CASE : str = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : str = "." SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) SCREAMING_SNAKE_CASE : int = output.images assert image.shape == (1, 512, 768, 3)
76
def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(a - b) for a, b in zip(_a , _a))) def lowerCamelCase__ ( _a): if point: if isinstance(_a , _a): for item in point: if not isinstance(_a , (int, float)): SCREAMING_SNAKE_CASE : List[Any] = ( "Expected a list of numbers as input, found " f"{type(_a).__name__}" ) raise TypeError(_a) else: SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}" raise TypeError(_a) else: raise ValueError("Missing an input") def lowerCamelCase__ ( _a , _a): _validate_point(_a) _validate_point(_a) if len(_a) != len(_a): raise ValueError("Both points must be in the same n-dimensional space") return float(sum(abs(x - y) for x, y in zip(_a , _a))) if __name__ == "__main__": import doctest doctest.testmod()
76
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _a , _a): # Load checkpoint SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu") SCREAMING_SNAKE_CASE : Dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE : List[str] = v else: SCREAMING_SNAKE_CASE : int = v SCREAMING_SNAKE_CASE : int = chkpt["params"] SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))} SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(_a , _a) print(f"Save configuration file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(_a , "w" , encoding="utf-8") as f: f.write(json.dumps(_a , indent=2) + "\n") if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
76
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='vit_msn' def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : int = layer_norm_eps SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Tuple = patch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = qkv_bias
76
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def lowerCamelCase__ ( _a): # initialize config if "resnet-50" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50") elif "resnet-101" in model_name: SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101") else: raise ValueError("Model name should include either resnet50 or resnet101") SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a) # set label attributes SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name if is_panoptic: SCREAMING_SNAKE_CASE : Union[str, Any] = 250 else: SCREAMING_SNAKE_CASE : Union[str, Any] = 91 SCREAMING_SNAKE_CASE : str = "huggingface/label-files" SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json" SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r")) SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase__ ( _a): # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE : Union[str, Any] = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", )) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", )) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", )) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", )) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", )) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ]) return rename_keys def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : str = state_dict.pop(_a) SCREAMING_SNAKE_CASE : int = val def lowerCamelCase__ ( _a , _a=False): SCREAMING_SNAKE_CASE : Optional[Any] = "" if is_panoptic: SCREAMING_SNAKE_CASE : Optional[int] = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : int = in_proj_bias[:256] SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256] SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight") SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw) return im @torch.no_grad() def lowerCamelCase__ ( _a , _a=None , _a=False): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a) # load original model from torch hub SCREAMING_SNAKE_CASE : Union[str, Any] = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"Converting model {model_name}...") SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval() SCREAMING_SNAKE_CASE : Tuple = detr.state_dict() # rename keys for src, dest in create_rename_keys(_a): if is_panoptic: SCREAMING_SNAKE_CASE : List[str] = "detr." + src rename_key(_a , _a , _a) # query, key and value matrices need special treatment read_in_q_k_v(_a , is_panoptic=_a) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Optional[int] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a) SCREAMING_SNAKE_CASE : List[Any] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a) SCREAMING_SNAKE_CASE : Any = val # finally, create HuggingFace model and load state dict SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a) model.load_state_dict(_a) model.eval() # verify our conversion on an image SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection" SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a) SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt") SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"] SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a) SCREAMING_SNAKE_CASE : Any = model(_a) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(_a).mkdir(exist_ok=_a) model.save_pretrained(_a) processor.save_pretrained(_a) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') a_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
76
import baseaa def lowerCamelCase__ ( _a): return baseaa.aaaencode(string.encode("utf-8")) def lowerCamelCase__ ( _a): return baseaa.aaadecode(_a).decode("utf-8") if __name__ == "__main__": import doctest doctest.testmod()
76
1