code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class A ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> Any: __UpperCamelCase : int = load_tool("text-question-answering" ) self.tool.setup() __UpperCamelCase : Any = load_tool("text-question-answering" , remote=_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.tool(_UpperCAmelCase , "What did Hugging Face do in April 2021?" ) self.assertEqual(_UpperCAmelCase , "launched the BigScience Research Workshop" ) def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = self.remote_tool(_UpperCAmelCase , "What did Hugging Face do in April 2021?" ) self.assertEqual(_UpperCAmelCase , "launched the BigScience Research Workshop" ) def a_ (self ) -> str: __UpperCamelCase : Union[str, Any] = self.tool(text=_UpperCAmelCase , question="What did Hugging Face do in April 2021?" ) self.assertEqual(_UpperCAmelCase , "launched the BigScience Research Workshop" ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[Any] = self.remote_tool(text=_UpperCAmelCase , question="What did Hugging Face do in April 2021?" ) self.assertEqual(_UpperCAmelCase , "launched the BigScience Research Workshop" )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) A = Features({"image": Image()} ) A = Features({"labels": ClassLabel} ) A = "image" A = "labels" def a_ (self , _UpperCAmelCase ) -> Any: if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , _UpperCAmelCase ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) __UpperCamelCase : Dict = copy.deepcopy(self ) __UpperCamelCase : Tuple = self.label_schema.copy() __UpperCamelCase : Tuple = features[self.label_column] __UpperCamelCase : Optional[Any] = label_schema return task_template @property def a_ (self ) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations(snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations_with_dp_array( snake_case__ , snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase : List[str] = answer return answer __UpperCamelCase : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [0] * (target + 1) __UpperCamelCase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = 3 _lowerCAmelCase = 5 _lowerCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
1
'''simple docstring''' import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--original_config_file''', type=str, required=True, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--image_size''', default=512, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') def __lowerCAmelCase ( snake_case__ ): if string == "True": return True elif string == "False": return False else: raise ValueError(F"could not parse string as bool {string}" ) parser.add_argument( '''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool ) parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
298
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
1
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowerCAmelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _lowerCAmelCase = { '''camembert-base''': 512, } _lowerCAmelCase = '''▁''' class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = ["input_ids", "attention_mask"] def __init__(self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token __UpperCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) __UpperCamelCase : int = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __UpperCamelCase : Dict = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __UpperCamelCase : Any = len(self.fairseq_tokens_to_ids ) __UpperCamelCase : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __UpperCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase : Optional[int] = [self.cls_token_id] __UpperCamelCase : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: __UpperCamelCase : List[Any] = [self.sep_token_id] __UpperCamelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a_ (self ) -> Dict: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ (self , _UpperCAmelCase ) -> List[str]: return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Dict = [] __UpperCamelCase : int = "" __UpperCamelCase : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Optional[Any] = [] else: current_sub_tokens.append(_UpperCAmelCase ) __UpperCamelCase : List[str] = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def __getstate__(self ) -> Optional[Any]: __UpperCamelCase : Union[str, Any] = self.__dict__.copy() __UpperCamelCase : List[str] = None return state def __setstate__(self , _UpperCAmelCase ) -> str: __UpperCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCamelCase : Union[str, Any] = {} __UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __UpperCamelCase : Optional[int] = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , "wb" ) as fi: __UpperCamelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
298
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } __UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCamelCase : Any = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" ) __UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) __UpperCamelCase : Union[str, Any] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) __UpperCamelCase : int = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCamelCase : Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case__ ), } __UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ ) __UpperCamelCase : str = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ , snake_case__ ): __UpperCamelCase : Any = hf_param.shape __UpperCamelCase : List[Any] = to_torch(params[gluon_param] ) __UpperCamelCase : Union[str, Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param __UpperCamelCase : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __UpperCamelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCamelCase : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCamelCase : BertSelfAttention = layer.attention.self __UpperCamelCase : int = check_and_map_params( self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) __UpperCamelCase : str = check_and_map_params( self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) __UpperCamelCase : Tuple = check_and_map_params( self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output __UpperCamelCase : BertSelfOutput = layer.attention.output __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" ) __UpperCamelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate __UpperCamelCase : BertIntermediate = layer.intermediate __UpperCamelCase : Dict = check_and_map_params( intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output __UpperCamelCase : BertOutput = layer.output __UpperCamelCase : Dict = check_and_map_params( bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) __UpperCamelCase : Union[str, Any] = check_and_map_params( bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) __UpperCamelCase : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) __UpperCamelCase : int = check_and_map_params( bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" ) __UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"] # Get gluon output __UpperCamelCase : Dict = mx.nd.array([input_ids] ) __UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) __UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() __UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" ) __UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0] __UpperCamelCase : List[Any] = output_gluon[0].asnumpy() __UpperCamelCase : Optional[int] = output_hf[0].detach().numpy() __UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A : '''simple docstring''' A = XGLMConfig A = {} A = "gelu" def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_4 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=0.02 , ) -> Optional[Any]: __UpperCamelCase : List[Any] = parent __UpperCamelCase : List[Any] = batch_size __UpperCamelCase : Optional[Any] = seq_length __UpperCamelCase : List[Any] = is_training __UpperCamelCase : Optional[Any] = use_input_mask __UpperCamelCase : Tuple = use_labels __UpperCamelCase : int = vocab_size __UpperCamelCase : Tuple = d_model __UpperCamelCase : List[str] = num_hidden_layers __UpperCamelCase : Optional[Any] = num_attention_heads __UpperCamelCase : List[str] = ffn_dim __UpperCamelCase : Union[str, Any] = activation_function __UpperCamelCase : Union[str, Any] = activation_dropout __UpperCamelCase : Optional[int] = attention_dropout __UpperCamelCase : Union[str, Any] = max_position_embeddings __UpperCamelCase : Dict = initializer_range __UpperCamelCase : Tuple = None __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : Union[str, Any] = 2 __UpperCamelCase : Optional[int] = 1 def a_ (self ) -> str: return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def a_ (self ) -> Any: __UpperCamelCase : Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __UpperCamelCase : Tuple = None if self.use_input_mask: __UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = self.get_config() __UpperCamelCase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def a_ (self ) -> Dict: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_UpperCAmelCase , ) def a_ (self ) -> str: __UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : str = config_and_inputs __UpperCamelCase : Union[str, Any] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () A = (TFXGLMForCausalLM,) if is_tf_available() else () A = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[Any]: __UpperCamelCase : Any = TFXGLMModelTester(self ) __UpperCamelCase : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , n_embd=3_7 ) def a_ (self ) -> Optional[int]: self.config_tester.run_common_tests() @slow def a_ (self ) -> Optional[int]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def a_ (self ) -> Optional[int]: super().test_resize_token_embeddings() @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self , _UpperCAmelCase=True ) -> Dict: __UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : Tuple = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __UpperCamelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __UpperCamelCase : Union[str, Any] = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : int = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __UpperCamelCase : Dict = tokenizer("Today is a nice day and" , return_tensors="tf" ) __UpperCamelCase : Optional[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __UpperCamelCase : str = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , seed=[7, 0] ) __UpperCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase ) __UpperCamelCase : List[Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) @slow def a_ (self ) -> Dict: __UpperCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : Optional[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __UpperCamelCase : List[Any] = "left" # use different length sentences to test batching __UpperCamelCase : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __UpperCamelCase : Tuple = tokenizer(_UpperCAmelCase , return_tensors="tf" , padding=_UpperCAmelCase ) __UpperCamelCase : Dict = inputs["input_ids"] __UpperCamelCase : Any = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) __UpperCamelCase : Dict = tokenizer(sentences[0] , return_tensors="tf" ).input_ids __UpperCamelCase : Optional[Any] = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=1_2 ) __UpperCamelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids __UpperCamelCase : str = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=1_2 ) __UpperCamelCase : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __UpperCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase ) __UpperCamelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase ) __UpperCamelCase : str = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
298
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _lowerCAmelCase = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "linear" A = "cosine" A = "cosine_with_restarts" A = "polynomial" A = "constant" A = "constant_with_warmup" A = "piecewise_constant" def __lowerCAmelCase ( snake_case__ , snake_case__ = -1 ): return LambdaLR(snake_case__ , lambda snake_case__ : 1 , last_epoch=snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = -1 ): def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1.0 , snake_case__ ) ) return 1.0 return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = -1 ): __UpperCamelCase : Dict = {} __UpperCamelCase : Tuple = step_rules.split("," ) for rule_str in rule_list[:-1]: __UpperCamelCase , __UpperCamelCase : Any = rule_str.split(":" ) __UpperCamelCase : Optional[Any] = int(snake_case__ ) __UpperCamelCase : Tuple = float(snake_case__ ) __UpperCamelCase : Tuple = value __UpperCamelCase : str = float(rule_list[-1] ) def create_rules_function(snake_case__ , snake_case__ ): def rule_func(snake_case__ ) -> float: __UpperCamelCase : Union[str, Any] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(snake_case__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __UpperCamelCase : Any = create_rules_function(snake_case__ , snake_case__ ) return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=-1 ): def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.5 , snake_case__ = -1 ): def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) __UpperCamelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case__ ) * 2.0 * progress )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , snake_case__ = -1 ): def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) __UpperCamelCase : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case__ ) * progress) % 1.0) )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=1E-7 , snake_case__=1.0 , snake_case__=-1 ): __UpperCamelCase : List[str] = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" ) def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __UpperCamelCase : Union[str, Any] = lr_init - lr_end __UpperCamelCase : List[Any] = num_training_steps - num_warmup_steps __UpperCamelCase : List[Any] = 1 - (current_step - num_warmup_steps) / decay_steps __UpperCamelCase : Union[str, Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = 1 , snake_case__ = 1.0 , snake_case__ = -1 , ): __UpperCamelCase : Dict = SchedulerType(snake_case__ ) __UpperCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(snake_case__ , last_epoch=snake_case__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(snake_case__ , step_rules=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(snake_case__ , num_warmup_steps=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , num_cycles=snake_case__ , last_epoch=snake_case__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , power=snake_case__ , last_epoch=snake_case__ , ) return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , last_epoch=snake_case__ )
298
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
1
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
1
'''simple docstring''' from __future__ import annotations _lowerCAmelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Tuple = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __UpperCamelCase : str = 1 __UpperCamelCase : Tuple = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __UpperCamelCase : Dict = init[0] __UpperCamelCase : Any = init[1] __UpperCamelCase : Any = 0 __UpperCamelCase : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell __UpperCamelCase : List[str] = [[f, g, x, y]] __UpperCamelCase : Tuple = False # flag that is set when search is complete __UpperCamelCase : Union[str, Any] = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __UpperCamelCase : Any = cell.pop() __UpperCamelCase : Optional[Any] = next_cell[2] __UpperCamelCase : Any = next_cell[3] __UpperCamelCase : str = next_cell[1] if x == goal[0] and y == goal[1]: __UpperCamelCase : Optional[int] = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __UpperCamelCase : List[Any] = x + DIRECTIONS[i][0] __UpperCamelCase : int = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __UpperCamelCase : Any = g + cost __UpperCamelCase : Any = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __UpperCamelCase : List[str] = 1 __UpperCamelCase : str = i __UpperCamelCase : List[str] = [] __UpperCamelCase : Union[str, Any] = goal[0] __UpperCamelCase : Optional[int] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __UpperCamelCase : Tuple = x - DIRECTIONS[action[x][y]][0] __UpperCamelCase : Optional[int] = y - DIRECTIONS[action[x][y]][1] __UpperCamelCase : int = xa __UpperCamelCase : int = ya invpath.append([x, y] ) __UpperCamelCase : List[Any] = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": _lowerCAmelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] _lowerCAmelCase = [0, 0] # all coordinates are given in format [y,x] _lowerCAmelCase = [len(grid) - 1, len(grid[0]) - 1] _lowerCAmelCase = 1 # the cost map which pushes the path closer to the goal _lowerCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): _lowerCAmelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map _lowerCAmelCase = 99 _lowerCAmelCase , _lowerCAmelCase = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
298
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): def merge(snake_case__ , snake_case__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(snake_case__ ) <= 1: return collection __UpperCamelCase : int = len(snake_case__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
298
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
1
'''simple docstring''' import re def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : List[Any] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(snake_case__ , snake_case__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
298
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = '''config.json''' _lowerCAmelCase = '''diffusion_pytorch_model.bin''' _lowerCAmelCase = '''diffusion_flax_model.msgpack''' _lowerCAmelCase = '''model.onnx''' _lowerCAmelCase = '''diffusion_pytorch_model.safetensors''' _lowerCAmelCase = '''weights.pb''' _lowerCAmelCase = '''https://huggingface.co''' _lowerCAmelCase = default_cache_path _lowerCAmelCase = '''diffusers_modules''' _lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _lowerCAmelCase = ['''fp16''', '''non-ema'''] _lowerCAmelCase = '''.self_attn'''
298
1
'''simple docstring''' import os import sys _lowerCAmelCase = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _lowerCAmelCase = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoConfig.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoTokenizer.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModel.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModel.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForCausalLM.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForMaskedLM.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForSequenceClassification.from_pretrained(*snake_case__ , **snake_case__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __lowerCAmelCase ( *snake_case__ , **snake_case__ ): return AutoModelForQuestionAnswering.from_pretrained(*snake_case__ , **snake_case__ )
298
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : List[str] = 1_3 __UpperCamelCase : List[Any] = 7 __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True __UpperCamelCase : str = True __UpperCamelCase : List[Any] = 9_9 __UpperCamelCase : Union[str, Any] = 3_8_4 __UpperCamelCase : str = 2 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : Any = 3_7 __UpperCamelCase : str = "gelu" __UpperCamelCase : Optional[Any] = 0.1 __UpperCamelCase : str = 0.1 __UpperCamelCase : str = 5_1_2 __UpperCamelCase : Optional[Any] = 1_6 __UpperCamelCase : Dict = 2 __UpperCamelCase : Optional[int] = 0.02 __UpperCamelCase : List[Any] = 3 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : int = 1_2_8 __UpperCamelCase : Tuple = 2 __UpperCamelCase : str = 9 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : Any = None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = None if self.use_token_type_ids: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : List[Any] = None __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Optional[Any] = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCamelCase : Optional[Any] = [input_ids, input_mask] __UpperCamelCase : str = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __UpperCamelCase : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_choices __UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = self.num_labels __UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self ) -> str: __UpperCamelCase : str = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Any = config_and_inputs __UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = TFConvBertModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> Dict: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : int = True if hasattr(_UpperCAmelCase , "use_cache" ): __UpperCamelCase : List[Any] = True __UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = model_class(_UpperCAmelCase ) __UpperCamelCase : Any = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" ) __UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase ) __UpperCamelCase : Dict = model(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : Any = outputs["encoder_hidden_states"] __UpperCamelCase : Tuple = outputs["encoder_attentions"] else: __UpperCamelCase : Tuple = outputs["hidden_states"] __UpperCamelCase : Optional[int] = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : Any = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Dict = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __UpperCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __UpperCamelCase : Any = True __UpperCamelCase : Dict = False __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = model_class(_UpperCAmelCase ) __UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : str = True __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0] __UpperCamelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Any = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
298
1
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> List[str]: for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(_UpperCAmelCase ): __UpperCamelCase : List[str] = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow def a_ (self ) -> Any: for model_name in ["roberta-base", "roberta-large"]: with self.subTest(_UpperCAmelCase ): __UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : str = FlaxAutoModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow def a_ (self ) -> Any: for model_name in ["bert-base-cased", "bert-large-uncased"]: __UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase ) __UpperCamelCase : Dict = FlaxBertModel.from_pretrained(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() @slow def a_ (self ) -> str: for model_name in ["roberta-base", "roberta-large"]: __UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = FlaxRobertaModel.from_pretrained(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**_UpperCAmelCase ): return model(**_UpperCAmelCase ) eval(**_UpperCAmelCase ).block_until_ready() def a_ (self ) -> str: with self.assertRaisesRegex( _UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ): __UpperCamelCase : List[str] = FlaxAutoModel.from_pretrained("bert-base" ) def a_ (self ) -> Dict: with self.assertRaisesRegex( _UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): __UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision="aaaaaa" ) def a_ (self ) -> List[Any]: with self.assertRaisesRegex( _UpperCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ): __UpperCamelCase : str = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def a_ (self ) -> Union[str, Any]: with self.assertRaisesRegex(_UpperCAmelCase , "Use `from_pt=True` to load this model" ): __UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
298
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() @dataclass class A : '''simple docstring''' A = 42 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase ) -> Optional[int]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def a_ (self ) -> Tuple: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A : '''simple docstring''' A = 42 A = 42 A = 0 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def __call__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized __UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" f" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ): print(F"Converting {name}..." ) with torch.no_grad(): __UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval() __UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ ) __UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(snake_case__ ) assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one." __UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}" print(snake_case__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , ) # we can use the convnext one __UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , ) print(F"Pushed {checkpoint_name}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Any = 1_000 __UpperCamelCase : List[str] = (1, num_labels) __UpperCamelCase : List[str] = "huggingface/label-files" __UpperCamelCase : str = num_labels __UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Any = idalabel __UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) __UpperCamelCase : Dict = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): if index == number_of_items: return 0 __UpperCamelCase : Dict = 0 __UpperCamelCase : Union[str, Any] = 0 __UpperCamelCase : Tuple = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 ) if weights[index] <= max_weight: __UpperCamelCase : Dict = values[index] + knapsack( snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 ) return max(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
'''simple docstring''' from maths.prime_check import is_prime def __lowerCAmelCase ( snake_case__ ): if not isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 2_5_5 , _UpperCAmelCase=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __UpperCamelCase : Any = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} __UpperCamelCase : Any = parent __UpperCamelCase : Optional[Any] = batch_size __UpperCamelCase : List[Any] = num_channels __UpperCamelCase : Union[str, Any] = min_resolution __UpperCamelCase : Dict = max_resolution __UpperCamelCase : List[str] = do_resize __UpperCamelCase : Optional[int] = size __UpperCamelCase : List[str] = do_normalize __UpperCamelCase : str = image_mean __UpperCamelCase : Dict = image_std __UpperCamelCase : Dict = do_rescale __UpperCamelCase : int = rescale_factor __UpperCamelCase : int = do_pad def a_ (self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[Any]: if not batched: __UpperCamelCase : str = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Dict = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] if w < h: __UpperCamelCase : Tuple = int(self.size["shortest_edge"] * h / w ) __UpperCamelCase : Optional[Any] = self.size["shortest_edge"] elif w > h: __UpperCamelCase : Optional[Any] = self.size["shortest_edge"] __UpperCamelCase : str = int(self.size["shortest_edge"] * w / h ) else: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Tuple = self.size["shortest_edge"] else: __UpperCamelCase : Optional[Any] = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : str = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = DeformableDetrImageProcessor if is_vision_available() else None def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = DeformableDetrImageProcessingTester(self ) @property def a_ (self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> str: __UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_rescale" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_pad" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) __UpperCamelCase : int = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_UpperCAmelCase ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> List[str]: # Initialize image_processing __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Optional[Any]: # Initialize image_processing __UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> List[str]: # Initialize image_processing __UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def a_ (self ) -> List[Any]: # prepare image and target __UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: __UpperCamelCase : List[Any] = json.loads(f.read() ) __UpperCamelCase : Dict = {"image_id": 3_9_7_6_9, "annotations": target} # encode them __UpperCamelCase : List[str] = DeformableDetrImageProcessor() __UpperCamelCase : Union[str, Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors="pt" ) # verify pixel values __UpperCamelCase : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase ) __UpperCamelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) ) # verify area __UpperCamelCase : Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) ) # verify boxes __UpperCamelCase : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase ) __UpperCamelCase : Any = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) ) # verify image_id __UpperCamelCase : Optional[Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) ) # verify is_crowd __UpperCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) ) # verify class_labels __UpperCamelCase : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) ) # verify orig_size __UpperCamelCase : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) ) # verify size __UpperCamelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) ) @slow def a_ (self ) -> str: # prepare image, target and masks_path __UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: __UpperCamelCase : Dict = json.loads(f.read() ) __UpperCamelCase : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} __UpperCamelCase : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them __UpperCamelCase : List[Any] = DeformableDetrImageProcessor(format="coco_panoptic" ) __UpperCamelCase : List[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors="pt" ) # verify pixel values __UpperCamelCase : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase ) __UpperCamelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) ) # verify area __UpperCamelCase : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) ) # verify boxes __UpperCamelCase : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase ) __UpperCamelCase : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) ) # verify image_id __UpperCamelCase : Optional[Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) ) # verify is_crowd __UpperCamelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) ) # verify class_labels __UpperCamelCase : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) ) # verify masks __UpperCamelCase : Dict = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCAmelCase ) # verify orig_size __UpperCamelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) ) # verify size __UpperCamelCase : str = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: __UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy) __UpperCamelCase : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCAmelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
298
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = None A = None @property def a_ (self ) -> Dict: return self.feat_extract_tester.prepare_feat_extract_dict() def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "feature_size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "sampling_rate" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "padding_value" ) ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : Dict = feat_extract.model_input_names[0] __UpperCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) ) __UpperCamelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase ) __UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) __UpperCamelCase : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCamelCase : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def a_ (self ) -> int: __UpperCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase ) __UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : List[str] = feat_extract.model_input_names[0] __UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) __UpperCamelCase : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCamelCase : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def a_ (self ) -> Optional[int]: __UpperCamelCase : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase ) __UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : Optional[int] = feat_extract.model_input_names[0] __UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" ) __UpperCamelCase : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCamelCase : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def a_ (self , _UpperCAmelCase=False ) -> Union[str, Any]: def _inputs_have_equal_length(_UpperCAmelCase ): __UpperCamelCase : Dict = len(input[0] ) for input_slice in input[1:]: if len(_UpperCAmelCase ) != length: return False return True def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ): if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ): if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ): return False return True __UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = feat_extract.model_input_names[0] __UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} ) __UpperCamelCase : Any = self.feat_extract_tester.seq_length_diff __UpperCamelCase : Any = self.feat_extract_tester.max_seq_length + pad_diff __UpperCamelCase : str = self.feat_extract_tester.min_seq_length __UpperCamelCase : Optional[int] = self.feat_extract_tester.batch_size __UpperCamelCase : int = self.feat_extract_tester.feature_size # test padding for List[int] + numpy __UpperCamelCase : List[str] = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = input_a[input_name] __UpperCamelCase : Optional[Any] = feat_extract.pad(_UpperCAmelCase , padding="longest" ) __UpperCamelCase : List[str] = input_a[input_name] __UpperCamelCase : int = feat_extract.pad(_UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) ) __UpperCamelCase : Any = input_a[input_name] __UpperCamelCase : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="np" ) __UpperCamelCase : Union[str, Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_UpperCAmelCase ): feat_extract.pad(_UpperCAmelCase , padding="max_length" )[input_name] __UpperCamelCase : Tuple = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=_UpperCAmelCase , return_tensors="np" ) __UpperCamelCase : Any = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __UpperCamelCase : Optional[Any] = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=1_0 ) __UpperCamelCase : Tuple = input_a[input_name] __UpperCamelCase : str = feat_extract.pad(_UpperCAmelCase , padding="longest" , pad_to_multiple_of=1_0 ) __UpperCamelCase : Tuple = input_a[input_name] __UpperCamelCase : Optional[Any] = feat_extract.pad( _UpperCAmelCase , padding="max_length" , pad_to_multiple_of=1_0 , max_length=_UpperCAmelCase ) __UpperCamelCase : Tuple = input_a[input_name] __UpperCamelCase : int = feat_extract.pad( _UpperCAmelCase , padding="max_length" , pad_to_multiple_of=1_0 , max_length=_UpperCAmelCase , return_tensors="np" , ) __UpperCamelCase : List[str] = input_a[input_name] self.assertTrue(all(len(_UpperCAmelCase ) % 1_0 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : Optional[int] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0 self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __UpperCamelCase : Optional[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def a_ (self , _UpperCAmelCase=False ) -> Optional[int]: def _inputs_have_equal_length(_UpperCAmelCase ): __UpperCamelCase : str = len(input[0] ) for input_slice in input[1:]: if len(_UpperCAmelCase ) != length: return False return True def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ): if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ): if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ): return False return True __UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase ) __UpperCamelCase : List[str] = feat_extract.model_input_names[0] __UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest __UpperCamelCase : int = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = input_a[input_name] __UpperCamelCase : str = feat_extract.pad(_UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) ) __UpperCamelCase : List[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) ) # truncate to smallest with np __UpperCamelCase : Tuple = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=_UpperCAmelCase , ) __UpperCamelCase : str = input_a[input_name] __UpperCamelCase : Union[str, Any] = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" ) __UpperCamelCase : Union[str, Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) ) # truncate to middle __UpperCamelCase : Tuple = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors="np" , ) __UpperCamelCase : List[Any] = input_a[input_name] __UpperCamelCase : Tuple = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase ) __UpperCamelCase : int = input_a[input_name] __UpperCamelCase : Any = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" ) __UpperCamelCase : Union[str, Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_UpperCAmelCase ): feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_UpperCAmelCase ): feat_extract.pad(_UpperCAmelCase , padding="longest" , truncation=_UpperCAmelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_UpperCAmelCase ): feat_extract.pad(_UpperCAmelCase , padding="longest" , truncation=_UpperCAmelCase )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_UpperCAmelCase ): feat_extract.pad(_UpperCAmelCase , padding="max_length" , truncation=_UpperCAmelCase )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __UpperCamelCase : Union[str, Any] = 1_2 __UpperCamelCase : Optional[Any] = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , ) __UpperCamelCase : str = input_a[input_name] __UpperCamelCase : Tuple = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , ) __UpperCamelCase : Optional[int] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __UpperCamelCase : Any = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __UpperCamelCase : List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) ) self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) ) def a_ (self ) -> Union[str, Any]: self._check_padding(numpify=_UpperCAmelCase ) def a_ (self ) -> List[Any]: self._check_padding(numpify=_UpperCAmelCase ) def a_ (self ) -> Any: self._check_truncation(numpify=_UpperCAmelCase ) def a_ (self ) -> Tuple: self._check_truncation(numpify=_UpperCAmelCase ) @require_torch def a_ (self ) -> Any: __UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCamelCase : List[str] = feat_extract.model_input_names[0] __UpperCamelCase : Any = BatchFeature({input_name: speech_inputs} ) __UpperCamelCase : Tuple = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="np" )[input_name] __UpperCamelCase : List[str] = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def a_ (self ) -> Optional[int]: __UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCamelCase : List[str] = feat_extract.model_input_names[0] __UpperCamelCase : str = BatchFeature({input_name: speech_inputs} ) __UpperCamelCase : Dict = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="np" )[input_name] __UpperCamelCase : str = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="tf" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def a_ (self ) -> Any: __UpperCamelCase : int = self.feat_extract_dict __UpperCamelCase : Any = True __UpperCamelCase : int = self.feature_extraction_class(**_UpperCAmelCase ) __UpperCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCamelCase : Union[str, Any] = [len(_UpperCAmelCase ) for x in speech_inputs] __UpperCamelCase : List[str] = feat_extract.model_input_names[0] __UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs} ) __UpperCamelCase : List[Any] = feat_extract.pad(_UpperCAmelCase , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , _UpperCAmelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.feat_extract_dict __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**_UpperCAmelCase ) __UpperCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCamelCase : Tuple = [len(_UpperCAmelCase ) for x in speech_inputs] __UpperCamelCase : Dict = feat_extract.model_input_names[0] __UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ) __UpperCamelCase : Optional[Any] = min(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = feat_extract.pad( _UpperCAmelCase , padding="max_length" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="np" ) self.assertIn("attention_mask" , _UpperCAmelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
298
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCAmelCase = '''src/transformers''' _lowerCAmelCase = '''docs/source/en/tasks''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCamelCase : str = f.readlines() # Find the start prompt. __UpperCamelCase : Dict = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 __UpperCamelCase : Dict = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) _lowerCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) __UpperCamelCase : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
298
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] ) def __lowerCAmelCase ( snake_case__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Dict = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" __UpperCamelCase : Optional[int] = [] __UpperCamelCase : Dict = list(range(snake_case__ ) ) # Find permutation while factorials: __UpperCamelCase : Union[str, Any] = factorials.pop() __UpperCamelCase , __UpperCamelCase : Dict = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Optional[Any] = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ): __UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: return json.load(snake_case__ ) raise ValueError(F"can't find {path}" ) _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_glue.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_clm_flax.main() __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_summarization_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def a_ (self ) -> int: __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_mlm_flax.main() __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_ta_mlm_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def a_ (self ) -> Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_ner.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_qa.main() __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
298
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _lowerCAmelCase = False try: _lowerCAmelCase = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class A : '''simple docstring''' def __init__(self , _UpperCAmelCase = None , _UpperCAmelCase = [] ) -> List[str]: __UpperCamelCase : Optional[int] = 0 __UpperCamelCase : str = choices __UpperCamelCase : Union[str, Any] = prompt if sys.platform == "win32": __UpperCamelCase : Union[str, Any] = "*" else: __UpperCamelCase : Union[str, Any] = "➔ " def a_ (self , _UpperCAmelCase , _UpperCAmelCase = "" ) -> Optional[Any]: if sys.platform != "win32": writeColor(self.choices[index] , 3_2 , _UpperCAmelCase ) else: forceWrite(self.choices[index] , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> List[Any]: if index == self.position: forceWrite(f" {self.arrow_char} " ) self.write_choice(_UpperCAmelCase ) else: forceWrite(f" {self.choices[index]}" ) reset_cursor() def a_ (self , _UpperCAmelCase , _UpperCAmelCase = 1 ) -> List[str]: __UpperCamelCase : Union[str, Any] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(_UpperCAmelCase ) move_cursor(_UpperCAmelCase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def a_ (self ) -> int: self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def a_ (self ) -> Union[str, Any]: self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def a_ (self ) -> int: move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def a_ (self ) -> Optional[Any]: move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(_UpperCAmelCase )] for number in range(1_0 )] ) def a_ (self ) -> Tuple: __UpperCamelCase : Dict = int(chr(self.current_selection ) ) __UpperCamelCase : Any = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , _UpperCAmelCase ) else: return else: return def a_ (self , _UpperCAmelCase = 0 ) -> str: if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) __UpperCamelCase : Optional[int] = default_choice for i in range(len(self.choices ) ): self.print_choice(_UpperCAmelCase ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: __UpperCamelCase : Optional[Any] = int(builtins.input() ) except ValueError: __UpperCamelCase : str = default_choice else: __UpperCamelCase : int = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(_UpperCAmelCase , "\n" ) return choice
298
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int: __UpperCamelCase : List[str] = parent __UpperCamelCase : str = batch_size __UpperCamelCase : str = decoder_seq_length # For common tests __UpperCamelCase : Optional[int] = self.decoder_seq_length __UpperCamelCase : Any = is_training __UpperCamelCase : Tuple = use_attention_mask __UpperCamelCase : Optional[int] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : Optional[int] = d_model __UpperCamelCase : Union[str, Any] = d_model __UpperCamelCase : int = decoder_layers __UpperCamelCase : Dict = decoder_layers __UpperCamelCase : str = decoder_ffn_dim __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : List[Any] = eos_token_id __UpperCamelCase : int = bos_token_id __UpperCamelCase : Tuple = pad_token_id __UpperCamelCase : Tuple = decoder_start_token_id __UpperCamelCase : Dict = use_cache __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : int = None __UpperCamelCase : Optional[int] = decoder_seq_length __UpperCamelCase : Optional[int] = 2 __UpperCamelCase : Optional[int] = 1 def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : int = None if self.use_attention_mask: __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : Optional[Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]: __UpperCamelCase : List[Any] = True __UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() __UpperCamelCase : Optional[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) __UpperCamelCase : List[Any] = model(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) __UpperCamelCase : List[Any] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"] __UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"] # select random slice __UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs __UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A = (TrOCRForCausalLM,) if is_torch_available() else () A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} A = True A = False def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase ) __UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase ) def a_ (self ) -> Dict: pass def a_ (self ) -> Optional[int]: pass def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> List[Any]: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase ) def a_ (self ) -> Any: return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def a_ (self ) -> Tuple: pass
298
1
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
298
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
1
'''simple docstring''' from functools import lru_cache @lru_cache def __lowerCAmelCase ( snake_case__ ): if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ = 100 ): __UpperCamelCase : List[str] = 0 __UpperCamelCase : Union[str, Any] = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations(snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations_with_dp_array( snake_case__ , snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase : List[str] = answer return answer __UpperCamelCase : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [0] * (target + 1) __UpperCamelCase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = 3 _lowerCAmelCase = 5 _lowerCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "levit" def __init__(self , _UpperCAmelCase=2_2_4 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1_6 , _UpperCAmelCase=[1_2_8, 2_5_6, 3_8_4] , _UpperCAmelCase=[4, 8, 1_2] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[1_6, 1_6, 1_6] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = image_size __UpperCamelCase : Optional[Any] = num_channels __UpperCamelCase : Any = kernel_size __UpperCamelCase : int = stride __UpperCamelCase : Dict = padding __UpperCamelCase : Tuple = hidden_sizes __UpperCamelCase : Optional[int] = num_attention_heads __UpperCamelCase : Tuple = depths __UpperCamelCase : Any = key_dim __UpperCamelCase : Any = drop_path_rate __UpperCamelCase : List[Any] = patch_size __UpperCamelCase : Tuple = attention_ratio __UpperCamelCase : Optional[int] = mlp_ratio __UpperCamelCase : Any = initializer_range __UpperCamelCase : List[Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = version.parse("1.11" ) @property def a_ (self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def a_ (self ) -> float: return 1E-4
298
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
1
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''▁''' _lowerCAmelCase = {'''vocab_file''': '''prophetnet.tokenizer'''} _lowerCAmelCase = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } _lowerCAmelCase = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } _lowerCAmelCase = { '''microsoft/xprophetnet-large-wiki100-cased''': 512, } def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = collections.OrderedDict() with open(snake_case__ , "r" , encoding="utf-8" ) as reader: __UpperCamelCase : Union[str, Any] = reader.readlines() for index, token in enumerate(snake_case__ ): __UpperCamelCase : List[Any] = token.rstrip("\n" ) __UpperCamelCase : List[str] = index return vocab class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = ["input_ids", "attention_mask"] def __init__(self , _UpperCAmelCase , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: __UpperCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise __UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) __UpperCamelCase : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __UpperCamelCase : Tuple = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(1_0 ): __UpperCamelCase : str = f"[unused{i}]" __UpperCamelCase : Optional[Any] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __UpperCamelCase : Optional[int] = 1_2 __UpperCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_UpperCAmelCase ) def __getstate__(self ) -> List[str]: __UpperCamelCase : List[str] = self.__dict__.copy() __UpperCamelCase : Optional[int] = None return state def __setstate__(self , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Any = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCamelCase : int = {} __UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return ([0] * len(_UpperCAmelCase )) + [1] return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: __UpperCamelCase : Any = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a_ (self ) -> str: return len(self.sp_model ) + self.fairseq_offset def a_ (self ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ (self , _UpperCAmelCase ) -> str: return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase : List[str] = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a_ (self , _UpperCAmelCase ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def a_ (self , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : List[str] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase , " " ).strip() return out_string def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __UpperCamelCase : int = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , "wb" ) as fi: __UpperCamelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] __UpperCamelCase : List[Any] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __lowerCAmelCase ( snake_case__="" ): __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : str = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 __UpperCamelCase : Optional[int] = AgentAudio(_UpperCAmelCase ) __UpperCamelCase : Dict = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(_UpperCAmelCase ) ) # Ensure that the file contains the same value as the original tensor __UpperCamelCase , __UpperCamelCase : Optional[int] = sf.read(_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , atol=1E-4 ) ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 __UpperCamelCase : List[Any] = get_new_path(suffix=".wav" ) sf.write(_UpperCAmelCase , _UpperCAmelCase , 1_6_0_0_0 ) __UpperCamelCase : List[str] = AgentAudio(_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , _UpperCAmelCase ) @require_vision @require_torch class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : Optional[int] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) ) __UpperCamelCase : str = AgentImage(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : List[str] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __UpperCamelCase : Optional[int] = Image.open(_UpperCAmelCase ) __UpperCamelCase : List[str] = AgentImage(_UpperCAmelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : List[str] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __UpperCamelCase : str = Image.open(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = AgentImage(_UpperCAmelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> Dict: __UpperCamelCase : Tuple = "Hey!" __UpperCamelCase : Any = AgentText(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , agent_type.to_string() ) self.assertEqual(_UpperCAmelCase , agent_type.to_raw() ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
298
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } __UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCamelCase : Any = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" ) __UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) __UpperCamelCase : Union[str, Any] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) __UpperCamelCase : int = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCamelCase : Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case__ ), } __UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ ) __UpperCamelCase : str = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ , snake_case__ ): __UpperCamelCase : Any = hf_param.shape __UpperCamelCase : List[Any] = to_torch(params[gluon_param] ) __UpperCamelCase : Union[str, Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param __UpperCamelCase : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __UpperCamelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCamelCase : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCamelCase : BertSelfAttention = layer.attention.self __UpperCamelCase : int = check_and_map_params( self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) __UpperCamelCase : str = check_and_map_params( self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) __UpperCamelCase : Tuple = check_and_map_params( self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output __UpperCamelCase : BertSelfOutput = layer.attention.output __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" ) __UpperCamelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate __UpperCamelCase : BertIntermediate = layer.intermediate __UpperCamelCase : Dict = check_and_map_params( intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output __UpperCamelCase : BertOutput = layer.output __UpperCamelCase : Dict = check_and_map_params( bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) __UpperCamelCase : Union[str, Any] = check_and_map_params( bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) __UpperCamelCase : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) __UpperCamelCase : int = check_and_map_params( bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" ) __UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"] # Get gluon output __UpperCamelCase : Dict = mx.nd.array([input_ids] ) __UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) __UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() __UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" ) __UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0] __UpperCamelCase : List[Any] = output_gluon[0].asnumpy() __UpperCamelCase : Optional[int] = output_hf[0].detach().numpy() __UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
'''simple docstring''' import math class A : '''simple docstring''' def __init__(self , _UpperCAmelCase=0 ) -> int: # a graph with Node 0,1,...,N-1 __UpperCamelCase : int = n __UpperCamelCase : Optional[Any] = [ [math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase ) ] # adjacency matrix for weight __UpperCamelCase : Tuple = [ [math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Union[str, Any] = w def a_ (self ) -> Optional[int]: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __UpperCamelCase : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Any: return self.dp[u][v] if __name__ == "__main__": _lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
298
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=1_8 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> List[str]: __UpperCamelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0} __UpperCamelCase : List[str] = parent __UpperCamelCase : Any = batch_size __UpperCamelCase : str = num_channels __UpperCamelCase : str = image_size __UpperCamelCase : Dict = min_resolution __UpperCamelCase : int = max_resolution __UpperCamelCase : str = size __UpperCamelCase : List[Any] = do_normalize __UpperCamelCase : Optional[Any] = do_convert_rgb __UpperCamelCase : int = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] __UpperCamelCase : List[Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def a_ (self ) -> Tuple: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" __UpperCamelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = PixaStructImageProcessor if is_vision_available() else None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = PixaStructImageProcessingTester(self ) @property def a_ (self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) ) def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = self.image_processor_tester.prepare_dummy_image() __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) __UpperCamelCase : Optional[Any] = 2_0_4_8 __UpperCamelCase : Union[str, Any] = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) ) def a_ (self ) -> Dict: # Initialize image_processor __UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : Optional[int] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase : List[str] = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def a_ (self ) -> Union[str, Any]: # Initialize image_processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 __UpperCamelCase : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCAmelCase ): __UpperCamelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches __UpperCamelCase : Any = "Hello" __UpperCamelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase : Union[str, Any] = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def a_ (self ) -> List[str]: # Initialize image_processor __UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) __UpperCamelCase : List[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def a_ (self ) -> Optional[int]: # Initialize image_processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : Tuple = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase : List[Any] = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = PixaStructImageProcessor if is_vision_available() else None def a_ (self ) -> str: __UpperCamelCase : Union[str, Any] = PixaStructImageProcessingTester(self , num_channels=4 ) __UpperCamelCase : int = 3 @property def a_ (self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> List[str]: __UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) ) def a_ (self ) -> Optional[Any]: # Initialize image_processor __UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : Optional[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase : Optional[int] = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = IFInpaintingPipeline A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A = PipelineTesterMixin.required_optional_params - {"latents"} def a_ (self ) -> Union[str, Any]: return self._get_dummy_components() def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> int: if str(_UpperCAmelCase ).startswith("mps" ): __UpperCamelCase : Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) else: __UpperCamelCase : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __UpperCamelCase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __UpperCamelCase : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a_ (self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def a_ (self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def a_ (self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def a_ (self ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def a_ (self ) -> Union[str, Any]: self._test_save_load_local() def a_ (self ) -> str: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
298
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
1
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class A ( unittest.TestCase ): '''simple docstring''' def a_ (self , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Union[str, Any] = 3 __UpperCamelCase : Tuple = 2_5_0 __UpperCamelCase : Optional[int] = ids_tensor((batch_size, length) , _UpperCAmelCase ) __UpperCamelCase : Dict = torch.ones((batch_size, length) , device=_UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : List[str] = self._get_tensors(5 ) __UpperCamelCase : Dict = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : Optional[int] = self._get_tensors(1_0 ) self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = MaxLengthCriteria(max_length=1_0 ) __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self._get_tensors(5 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) def a_ (self ) -> int: __UpperCamelCase : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCamelCase , __UpperCamelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase , __UpperCamelCase : List[Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : int = self._get_tensors(5 ) __UpperCamelCase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) ) def a_ (self ) -> Tuple: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(_UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) __UpperCamelCase : Any = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(_UpperCAmelCase ) , 1 )
298
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = '''config.json''' _lowerCAmelCase = '''diffusion_pytorch_model.bin''' _lowerCAmelCase = '''diffusion_flax_model.msgpack''' _lowerCAmelCase = '''model.onnx''' _lowerCAmelCase = '''diffusion_pytorch_model.safetensors''' _lowerCAmelCase = '''weights.pb''' _lowerCAmelCase = '''https://huggingface.co''' _lowerCAmelCase = default_cache_path _lowerCAmelCase = '''diffusers_modules''' _lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _lowerCAmelCase = ['''fp16''', '''non-ema'''] _lowerCAmelCase = '''.self_attn'''
298
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> int: __UpperCamelCase : Optional[int] = tempfile.mkdtemp() # fmt: off __UpperCamelCase : str = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on __UpperCamelCase : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : str = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] __UpperCamelCase : Tuple = {"unk_token": "<unk>"} __UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) __UpperCamelCase : Any = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } __UpperCamelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self , **_UpperCAmelCase ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , **_UpperCAmelCase ) -> List[str]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , **_UpperCAmelCase ) -> str: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __UpperCamelCase : str = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ (self ) -> str: __UpperCamelCase : Dict = self.get_tokenizer() __UpperCamelCase : Optional[int] = self.get_rust_tokenizer() __UpperCamelCase : Optional[Any] = self.get_image_processor() __UpperCamelCase : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) __UpperCamelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase ) __UpperCamelCase : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) __UpperCamelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __UpperCamelCase : Dict = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __UpperCamelCase : Optional[int] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = self.get_image_processor() __UpperCamelCase : Dict = self.get_tokenizer() __UpperCamelCase : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = self.prepare_image_inputs() __UpperCamelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="np" ) __UpperCamelCase : Tuple = processor(images=_UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = self.get_image_processor() __UpperCamelCase : Optional[int] = self.get_tokenizer() __UpperCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = "lower newer" __UpperCamelCase : List[str] = processor(text=_UpperCAmelCase ) __UpperCamelCase : int = tokenizer(_UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_image_processor() __UpperCamelCase : Optional[Any] = self.get_tokenizer() __UpperCamelCase : Optional[int] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) __UpperCamelCase : int = "lower newer" __UpperCamelCase : Optional[Any] = self.prepare_image_inputs() __UpperCamelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_UpperCAmelCase ): processor() def a_ (self ) -> List[Any]: __UpperCamelCase : Any = self.get_image_processor() __UpperCamelCase : List[Any] = self.get_tokenizer() __UpperCamelCase : Tuple = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) __UpperCamelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCamelCase : Any = processor.batch_decode(_UpperCAmelCase ) __UpperCamelCase : str = tokenizer.batch_decode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self ) -> Any: __UpperCamelCase : Optional[Any] = self.get_image_processor() __UpperCamelCase : Any = self.get_tokenizer() __UpperCamelCase : str = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = "lower newer" __UpperCamelCase : Optional[int] = self.prepare_image_inputs() __UpperCamelCase : int = processor(text=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
298
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : List[str] = 1_3 __UpperCamelCase : List[Any] = 7 __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True __UpperCamelCase : str = True __UpperCamelCase : List[Any] = 9_9 __UpperCamelCase : Union[str, Any] = 3_8_4 __UpperCamelCase : str = 2 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : Any = 3_7 __UpperCamelCase : str = "gelu" __UpperCamelCase : Optional[Any] = 0.1 __UpperCamelCase : str = 0.1 __UpperCamelCase : str = 5_1_2 __UpperCamelCase : Optional[Any] = 1_6 __UpperCamelCase : Dict = 2 __UpperCamelCase : Optional[int] = 0.02 __UpperCamelCase : List[Any] = 3 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : int = 1_2_8 __UpperCamelCase : Tuple = 2 __UpperCamelCase : str = 9 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : Any = None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = None if self.use_token_type_ids: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : List[Any] = None __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Optional[Any] = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCamelCase : Optional[Any] = [input_ids, input_mask] __UpperCamelCase : str = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __UpperCamelCase : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_choices __UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = self.num_labels __UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self ) -> str: __UpperCamelCase : str = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Any = config_and_inputs __UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = TFConvBertModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> Dict: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : int = True if hasattr(_UpperCAmelCase , "use_cache" ): __UpperCamelCase : List[Any] = True __UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = model_class(_UpperCAmelCase ) __UpperCamelCase : Any = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" ) __UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase ) __UpperCamelCase : Dict = model(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : Any = outputs["encoder_hidden_states"] __UpperCamelCase : Tuple = outputs["encoder_attentions"] else: __UpperCamelCase : Tuple = outputs["hidden_states"] __UpperCamelCase : Optional[int] = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : Any = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Dict = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __UpperCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __UpperCamelCase : Any = True __UpperCamelCase : Dict = False __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = model_class(_UpperCAmelCase ) __UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : str = True __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0] __UpperCamelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Any = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
298
1
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = XLMTokenizer A = False def a_ (self ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : List[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""] __UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Optional[Any] = "lower newer" __UpperCamelCase : Any = "lower newer" return input_text, output_text def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = XLMTokenizer(self.vocab_file , self.merges_file ) __UpperCamelCase : List[str] = "lower" __UpperCamelCase : Any = ["low", "er</w>"] __UpperCamelCase : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = tokens + ["<unk>"] __UpperCamelCase : Union[str, Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) @slow def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) __UpperCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=_UpperCAmelCase ) __UpperCamelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) __UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() @dataclass class A : '''simple docstring''' A = 42 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase ) -> Optional[int]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def a_ (self ) -> Tuple: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A : '''simple docstring''' A = 42 A = 42 A = 0 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def __call__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized __UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" f" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ): print(F"Converting {name}..." ) with torch.no_grad(): __UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval() __UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ ) __UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(snake_case__ ) assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one." __UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}" print(snake_case__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , ) # we can use the convnext one __UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , ) print(F"Pushed {checkpoint_name}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Any = 1_000 __UpperCamelCase : List[str] = (1, num_labels) __UpperCamelCase : List[str] = "huggingface/label-files" __UpperCamelCase : str = num_labels __UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Any = idalabel __UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) __UpperCamelCase : Dict = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
298
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class A ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> Optional[int]: __UpperCamelCase : Any = load_tool("text-classification" ) self.tool.setup() __UpperCamelCase : List[str] = load_tool("text-classification" , remote=_UpperCAmelCase ) def a_ (self ) -> Any: __UpperCamelCase : Dict = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[int] = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def a_ (self ) -> str: __UpperCamelCase : Optional[int] = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def a_ (self ) -> List[Any]: __UpperCamelCase : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" )
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "ViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> int: __UpperCamelCase : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : int = kwargs.pop("feature_extractor" ) __UpperCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Optional[Any]: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: __UpperCamelCase : Optional[int] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if visual_prompt is not None: __UpperCamelCase : List[str] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: __UpperCamelCase : str = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if visual_prompt is not None and images is not None: __UpperCamelCase : List[str] = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: __UpperCamelCase : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: __UpperCamelCase : Union[str, Any] = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Any: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
'''simple docstring''' from maths.prime_check import is_prime def __lowerCAmelCase ( snake_case__ ): if not isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Dict = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict __UpperCamelCase : str = torch.load(hf_hub_download(repo_id=snake_case__ , filename="pytorch_model.bin" ) ) __UpperCamelCase : Optional[int] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): __UpperCamelCase : Union[str, Any] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue __UpperCamelCase : Optional[int] = tensor_value __UpperCamelCase : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer __UpperCamelCase : Any = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: __UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy) __UpperCamelCase : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCAmelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
298
1
'''simple docstring''' import random def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[int] = num - 1 __UpperCamelCase : Optional[Any] = 0 while s % 2 == 0: __UpperCamelCase : Any = s // 2 t += 1 for _ in range(5 ): __UpperCamelCase : Optional[Any] = random.randrange(2 , num - 1 ) __UpperCamelCase : List[str] = pow(snake_case__ , snake_case__ , snake_case__ ) if v != 1: __UpperCamelCase : Any = 0 while v != (num - 1): if i == t - 1: return False else: __UpperCamelCase : Tuple = i + 1 __UpperCamelCase : Optional[int] = (v**2) % num return True def __lowerCAmelCase ( snake_case__ ): if num < 2: return False __UpperCamelCase : int = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(snake_case__ ) def __lowerCAmelCase ( snake_case__ = 1_024 ): while True: __UpperCamelCase : List[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(snake_case__ ): return num if __name__ == "__main__": _lowerCAmelCase = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
298
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCAmelCase = '''src/transformers''' _lowerCAmelCase = '''docs/source/en/tasks''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCamelCase : str = f.readlines() # Find the start prompt. __UpperCamelCase : Dict = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 __UpperCamelCase : Dict = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) _lowerCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) __UpperCamelCase : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
298
1
'''simple docstring''' from collections.abc import Iterable from typing import Any class A : '''simple docstring''' def __init__(self , _UpperCAmelCase = None ) -> int: __UpperCamelCase : List[Any] = value __UpperCamelCase : Node | None = None # Added in order to delete a node easier __UpperCamelCase : Node | None = None __UpperCamelCase : Node | None = None def __repr__(self ) -> str: from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"{self.value}": (self.left, self.right)} , indent=1 ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase = None ) -> Union[str, Any]: __UpperCamelCase : List[Any] = root def __str__(self ) -> str: return str(self.root ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None: if new_children is not None: # reset its kids __UpperCamelCase : Union[str, Any] = node.parent if node.parent is not None: # reset its parent if self.is_right(_UpperCAmelCase ): # If it is the right children __UpperCamelCase : Union[str, Any] = new_children else: __UpperCamelCase : Optional[Any] = new_children else: __UpperCamelCase : List[str] = new_children def a_ (self , _UpperCAmelCase ) -> bool: if node.parent and node.parent.right: return node == node.parent.right return False def a_ (self ) -> bool: return self.root is None def a_ (self , _UpperCAmelCase ) -> None: __UpperCamelCase : List[Any] = Node(_UpperCAmelCase ) # create a new Node if self.empty(): # if Tree is empty __UpperCamelCase : List[str] = new_node # set its root else: # Tree is not empty __UpperCamelCase : Dict = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: __UpperCamelCase : List[str] = new_node # We insert the new node in a leaf break else: __UpperCamelCase : Dict = parent_node.left else: if parent_node.right is None: __UpperCamelCase : List[str] = new_node break else: __UpperCamelCase : Optional[Any] = parent_node.right __UpperCamelCase : List[Any] = parent_node def a_ (self , *_UpperCAmelCase ) -> None: for value in values: self.__insert(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> Node | None: if self.empty(): raise IndexError("Warning: Tree is empty! please use another." ) else: __UpperCamelCase : Union[str, Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: __UpperCamelCase : Union[str, Any] = node.left if value < node.value else node.right return node def a_ (self , _UpperCAmelCase = None ) -> Node | None: if node is None: if self.root is None: return None __UpperCamelCase : Union[str, Any] = self.root if not self.empty(): while node.right is not None: __UpperCamelCase : int = node.right return node def a_ (self , _UpperCAmelCase = None ) -> Node | None: if node is None: __UpperCamelCase : Optional[int] = self.root if self.root is None: return None if not self.empty(): __UpperCamelCase : Union[str, Any] = self.root while node.left is not None: __UpperCamelCase : List[Any] = node.left return node def a_ (self , _UpperCAmelCase ) -> None: __UpperCamelCase : Optional[Any] = self.search(_UpperCAmelCase ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_UpperCAmelCase , _UpperCAmelCase ) elif node.left is None: # Has only right children self.__reassign_nodes(_UpperCAmelCase , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_UpperCAmelCase , node.left ) else: __UpperCamelCase : Any = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore __UpperCamelCase : Tuple = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def a_ (self , _UpperCAmelCase ) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def a_ (self , _UpperCAmelCase=None ) -> Any: if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None: if node: self.inorder(_UpperCAmelCase , node.left ) arr.append(node.value ) self.inorder(_UpperCAmelCase , node.right ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: __UpperCamelCase : list[int] = [] self.inorder(_UpperCAmelCase , _UpperCAmelCase ) # append all values to list using inorder traversal return arr[k - 1] def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = [] if curr_node is not None: __UpperCamelCase : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def __lowerCAmelCase ( ): __UpperCamelCase : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) __UpperCamelCase : Union[str, Any] = BinarySearchTree() for i in testlist: t.insert(snake_case__ ) # Prints all the elements of the list in order traversal print(snake_case__ ) if t.search(6 ) is not None: print("The value 6 exists" ) else: print("The value 6 doesn't exist" ) if t.search(-1 ) is not None: print("The value -1 exists" ) else: print("The value -1 doesn't exist" ) if not t.empty(): print("Max Value: " , t.get_max().value ) # type: ignore print("Min Value: " , t.get_min().value ) # type: ignore for i in testlist: t.remove(snake_case__ ) print(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
'''simple docstring''' from statistics import mean, stdev def __lowerCAmelCase ( snake_case__ , snake_case__ = 3 ): __UpperCamelCase : List[Any] = min(snake_case__ ) __UpperCamelCase : List[Any] = max(snake_case__ ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case__ ) for x in data] def __lowerCAmelCase ( snake_case__ , snake_case__ = 3 ): __UpperCamelCase : Tuple = mean(snake_case__ ) __UpperCamelCase : str = stdev(snake_case__ ) # standardize data return [round((x - mu) / (sigma) , snake_case__ ) for x in data]
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] ) def __lowerCAmelCase ( snake_case__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_6 , _UpperCAmelCase=3_6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Optional[Any]: __UpperCamelCase : int = parent __UpperCamelCase : str = batch_size __UpperCamelCase : Any = seq_length __UpperCamelCase : Union[str, Any] = is_training __UpperCamelCase : List[Any] = use_input_mask __UpperCamelCase : Union[str, Any] = use_token_type_ids __UpperCamelCase : Optional[int] = use_labels __UpperCamelCase : Optional[int] = vocab_size __UpperCamelCase : int = embedding_size __UpperCamelCase : Optional[Any] = hidden_size __UpperCamelCase : int = num_hidden_layers __UpperCamelCase : Union[str, Any] = num_hidden_groups __UpperCamelCase : List[str] = num_attention_heads __UpperCamelCase : str = intermediate_size __UpperCamelCase : Tuple = hidden_act __UpperCamelCase : List[str] = hidden_dropout_prob __UpperCamelCase : Any = attention_probs_dropout_prob __UpperCamelCase : str = max_position_embeddings __UpperCamelCase : str = type_vocab_size __UpperCamelCase : Union[str, Any] = type_sequence_label_size __UpperCamelCase : int = initializer_range __UpperCamelCase : Optional[int] = num_labels __UpperCamelCase : Union[str, Any] = num_choices __UpperCamelCase : Union[str, Any] = scope def a_ (self ) -> List[str]: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : Dict = None if self.use_token_type_ids: __UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : str = None __UpperCamelCase : Dict = None __UpperCamelCase : int = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self ) -> Optional[Any]: return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = AlbertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: __UpperCamelCase : str = AlbertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Dict = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : Optional[int] = AlbertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: __UpperCamelCase : Tuple = AlbertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : List[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Union[str, Any] = AlbertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : Dict = self.num_labels __UpperCamelCase : Optional[Any] = AlbertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Union[str, Any] = self.num_choices __UpperCamelCase : List[str] = AlbertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase : Optional[int] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self ) -> str: __UpperCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : List[str] = config_and_inputs __UpperCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A = True def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]: __UpperCamelCase : List[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): __UpperCamelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[int] = AlbertModelTester(self ) __UpperCamelCase : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> str: self.config_tester.run_common_tests() def a_ (self ) -> List[str]: __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCamelCase : int = type self.model_tester.create_and_check_model(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Dict = AlbertModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_torch class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Union[str, Any] = AlbertModel.from_pretrained("albert-base-v2" ) __UpperCamelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __UpperCamelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __UpperCamelCase : Any = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : List[str] = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
298
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Optional[Any] = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ): __UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: return json.load(snake_case__ ) raise ValueError(F"can't find {path}" ) _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_glue.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_clm_flax.main() __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_summarization_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def a_ (self ) -> int: __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_mlm_flax.main() __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_ta_mlm_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def a_ (self ) -> Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_ner.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_qa.main() __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
298
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = 0 A = False A = 3.0 class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def a_ (self ) -> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. __UpperCamelCase : Optional[int] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() __UpperCamelCase : int = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __UpperCamelCase : Tuple = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , _UpperCAmelCase ) @require_multi_gpu def a_ (self ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": _lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) _lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler]) _lowerCAmelCase = torch.nn.Linear(100, 200) _lowerCAmelCase = accelerator.prepare(model) # Check the values changed in kwargs _lowerCAmelCase = '''''' _lowerCAmelCase = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
298
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int: __UpperCamelCase : List[str] = parent __UpperCamelCase : str = batch_size __UpperCamelCase : str = decoder_seq_length # For common tests __UpperCamelCase : Optional[int] = self.decoder_seq_length __UpperCamelCase : Any = is_training __UpperCamelCase : Tuple = use_attention_mask __UpperCamelCase : Optional[int] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : Optional[int] = d_model __UpperCamelCase : Union[str, Any] = d_model __UpperCamelCase : int = decoder_layers __UpperCamelCase : Dict = decoder_layers __UpperCamelCase : str = decoder_ffn_dim __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : List[Any] = eos_token_id __UpperCamelCase : int = bos_token_id __UpperCamelCase : Tuple = pad_token_id __UpperCamelCase : Tuple = decoder_start_token_id __UpperCamelCase : Dict = use_cache __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : int = None __UpperCamelCase : Optional[int] = decoder_seq_length __UpperCamelCase : Optional[int] = 2 __UpperCamelCase : Optional[int] = 1 def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : int = None if self.use_attention_mask: __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : Optional[Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]: __UpperCamelCase : List[Any] = True __UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() __UpperCamelCase : Optional[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) __UpperCamelCase : List[Any] = model(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) __UpperCamelCase : List[Any] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"] __UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"] # select random slice __UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs __UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A = (TrOCRForCausalLM,) if is_torch_available() else () A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} A = True A = False def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase ) __UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase ) def a_ (self ) -> Dict: pass def a_ (self ) -> Optional[int]: pass def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> List[Any]: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase ) def a_ (self ) -> Any: return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def a_ (self ) -> Tuple: pass
298
1
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : List[str] = [] __UpperCamelCase , __UpperCamelCase : Union[str, Any] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __UpperCamelCase : List[str] = result + left + right return input_list def __lowerCAmelCase ( snake_case__ ): if len(snake_case__ ) <= 1: return input_list __UpperCamelCase : Optional[int] = list(snake_case__ ) # iteration for two-way merging __UpperCamelCase : Dict = 2 while p <= len(snake_case__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(snake_case__ ) , snake_case__ ): __UpperCamelCase : str = i __UpperCamelCase : Optional[Any] = i + p - 1 __UpperCamelCase : Dict = (low + high + 1) // 2 __UpperCamelCase : Dict = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # final merge of last two parts if p * 2 >= len(snake_case__ ): __UpperCamelCase : Union[str, Any] = i __UpperCamelCase : List[Any] = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": _lowerCAmelCase = [] else: _lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
298
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
1
'''simple docstring''' import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_attention_heads" ) ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=6_4 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1_6 , _UpperCAmelCase=[1_2_8, 2_5_6, 3_8_4] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[1_6, 1_6, 1_6] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> Dict: __UpperCamelCase : List[str] = parent __UpperCamelCase : Any = batch_size __UpperCamelCase : str = image_size __UpperCamelCase : Optional[Any] = num_channels __UpperCamelCase : List[str] = kernel_size __UpperCamelCase : int = stride __UpperCamelCase : Union[str, Any] = padding __UpperCamelCase : Tuple = hidden_sizes __UpperCamelCase : int = num_attention_heads __UpperCamelCase : Optional[int] = depths __UpperCamelCase : Tuple = key_dim __UpperCamelCase : Union[str, Any] = drop_path_rate __UpperCamelCase : List[str] = patch_size __UpperCamelCase : Optional[int] = attention_ratio __UpperCamelCase : Dict = mlp_ratio __UpperCamelCase : Optional[Any] = initializer_range __UpperCamelCase : str = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] __UpperCamelCase : Optional[int] = is_training __UpperCamelCase : Any = use_labels __UpperCamelCase : List[str] = num_labels __UpperCamelCase : Tuple = initializer_range def a_ (self ) -> str: __UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase : Optional[int] = None if self.use_labels: __UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def a_ (self ) -> Optional[Any]: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : List[str] = LevitModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : List[str] = model(_UpperCAmelCase ) __UpperCamelCase : int = (self.image_size, self.image_size) __UpperCamelCase , __UpperCamelCase : str = image_size[0], image_size[1] for _ in range(4 ): __UpperCamelCase : Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) __UpperCamelCase : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_labels __UpperCamelCase : int = LevitForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : Any = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = config_and_inputs __UpperCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) A = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) A = False A = False A = False A = False A = False def a_ (self ) -> List[str]: __UpperCamelCase : Dict = LevitModelTester(self ) __UpperCamelCase : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ (self ) -> Union[str, Any]: return @unittest.skip(reason="Levit does not use inputs_embeds" ) def a_ (self ) -> Union[str, Any]: pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def a_ (self ) -> Union[str, Any]: pass @unittest.skip(reason="Levit does not output attentions" ) def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : Union[str, Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase : List[str] = [*signature.parameters.keys()] __UpperCamelCase : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def a_ (self ) -> Any: def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __UpperCamelCase : int = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : Any = outputs.hidden_states __UpperCamelCase : Dict = len(self.model_tester.depths ) + 1 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : List[str] = (self.model_tester.image_size, self.model_tester.image_size) __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image_size[0], image_size[1] for _ in range(4 ): __UpperCamelCase : Optional[int] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) __UpperCamelCase : Dict = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def a_ (self ) -> int: pass def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]: __UpperCamelCase : List[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Any: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def a_ (self ) -> Tuple: if not self.model_tester.is_training: return __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : List[str] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_UpperCAmelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() __UpperCamelCase : Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = model(**_UpperCAmelCase ).loss loss.backward() def a_ (self ) -> List[str]: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __UpperCamelCase : Optional[int] = False __UpperCamelCase : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue __UpperCamelCase : Dict = model_class(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(_UpperCAmelCase ) model.train() __UpperCamelCase : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) __UpperCamelCase : Any = model(**_UpperCAmelCase ).loss loss.backward() def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : Any = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_UpperCAmelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): __UpperCamelCase : Optional[Any] = problem_type["title"] __UpperCamelCase : int = problem_type["num_labels"] __UpperCamelCase : Optional[int] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() __UpperCamelCase : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if problem_type["num_labels"] > 1: __UpperCamelCase : int = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __UpperCamelCase : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list: __UpperCamelCase : Tuple = model(**_UpperCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def a_ (self ) -> int: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Any = LevitModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCAmelCase ( ): __UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A ( unittest.TestCase ): '''simple docstring''' @cached_property def a_ (self ) -> Any: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.default_image_processor __UpperCamelCase : Optional[Any] = prepare_img() __UpperCamelCase : Dict = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase : Optional[Any] = model(**_UpperCAmelCase ) # verify the logits __UpperCamelCase : Optional[int] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __UpperCamelCase : List[Any] = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations(snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations_with_dp_array( snake_case__ , snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase : List[str] = answer return answer __UpperCamelCase : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [0] * (target + 1) __UpperCamelCase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = 3 _lowerCAmelCase = 5 _lowerCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=1_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Tuple: __UpperCamelCase : Tuple = parent __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Optional[int] = seq_length __UpperCamelCase : List[str] = is_training __UpperCamelCase : Dict = use_input_mask __UpperCamelCase : List[str] = use_token_type_ids __UpperCamelCase : List[Any] = use_labels __UpperCamelCase : Any = vocab_size __UpperCamelCase : List[str] = hidden_size __UpperCamelCase : int = num_hidden_layers __UpperCamelCase : Union[str, Any] = num_attention_heads __UpperCamelCase : Any = intermediate_size __UpperCamelCase : Any = hidden_act __UpperCamelCase : str = hidden_dropout_prob __UpperCamelCase : List[str] = attention_probs_dropout_prob __UpperCamelCase : Union[str, Any] = max_position_embeddings __UpperCamelCase : Dict = type_vocab_size __UpperCamelCase : Tuple = type_sequence_label_size __UpperCamelCase : List[Any] = initializer_range __UpperCamelCase : List[str] = num_labels __UpperCamelCase : Tuple = num_choices __UpperCamelCase : Tuple = scope def a_ (self ) -> Optional[Any]: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : Any = None __UpperCamelCase : int = None __UpperCamelCase : Union[str, Any] = None if self.use_labels: __UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : Tuple = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_UpperCAmelCase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : int = EsmForProteinFolding(config=_UpperCAmelCase ).float() model.to(_UpperCAmelCase ) model.eval() __UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) __UpperCamelCase : List[str] = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : List[str] = config_and_inputs __UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = False A = (EsmForProteinFolding,) if is_torch_available() else () A = () A = {} if is_torch_available() else {} A = False def a_ (self ) -> str: __UpperCamelCase : Dict = EsmFoldModelTester(self ) __UpperCamelCase : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Optional[Any]: self.config_tester.run_common_tests() def a_ (self ) -> List[Any]: __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) @unittest.skip("Does not support attention outputs" ) def a_ (self ) -> List[str]: pass @unittest.skip def a_ (self ) -> Tuple: pass @unittest.skip("Esm does not support embedding resizing" ) def a_ (self ) -> int: pass @unittest.skip("Esm does not support embedding resizing" ) def a_ (self ) -> Dict: pass @unittest.skip("ESMFold does not support passing input embeds!" ) def a_ (self ) -> str: pass @unittest.skip("ESMFold does not support head pruning." ) def a_ (self ) -> int: pass @unittest.skip("ESMFold does not support head pruning." ) def a_ (self ) -> Optional[Any]: pass @unittest.skip("ESMFold does not support head pruning." ) def a_ (self ) -> Tuple: pass @unittest.skip("ESMFold does not support head pruning." ) def a_ (self ) -> Any: pass @unittest.skip("ESMFold does not support head pruning." ) def a_ (self ) -> List[str]: pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def a_ (self ) -> str: pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def a_ (self ) -> str: pass @unittest.skip("ESMFold only has one output format." ) def a_ (self ) -> List[Any]: pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def a_ (self ) -> Tuple: pass @unittest.skip("ESMFold does not support input chunking." ) def a_ (self ) -> int: pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def a_ (self ) -> Any: pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def a_ (self ) -> Dict: pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def a_ (self ) -> int: pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def a_ (self ) -> List[Any]: pass @unittest.skip("ESMFold doesn't support data parallel." ) def a_ (self ) -> int: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def a_ (self ) -> Optional[Any]: pass @require_torch class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @slow def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() __UpperCamelCase : Dict = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )["positions"] __UpperCamelCase : Optional[Any] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _UpperCAmelCase , atol=1E-4 ) )
298
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
1
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets _lowerCAmelCase = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' _lowerCAmelCase = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): '''simple docstring''' def a_ (self ) -> str: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = CHRF.CHAR_ORDER , _UpperCAmelCase = CHRF.WORD_ORDER , _UpperCAmelCase = CHRF.BETA , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ) -> Union[str, Any]: __UpperCamelCase : int = len(references[0] ) if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __UpperCamelCase : List[str] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )] __UpperCamelCase : Dict = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : List[Any] = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class A : '''simple docstring''' A = 42 A = None A = None def __lowerCAmelCase ( ): __UpperCamelCase : Any = Node(1 ) __UpperCamelCase : Union[str, Any] = Node(2 ) __UpperCamelCase : str = Node(3 ) __UpperCamelCase : str = Node(4 ) __UpperCamelCase : Any = Node(5 ) return tree def __lowerCAmelCase ( snake_case__ ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def __lowerCAmelCase ( snake_case__ ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def __lowerCAmelCase ( snake_case__ ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def __lowerCAmelCase ( snake_case__ ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : list[Any] = [] if root is None: return output __UpperCamelCase : Any = deque([root] ) while process_queue: __UpperCamelCase : Union[str, Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : list[Any] = [] def populate_output(snake_case__ , snake_case__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : list[Any] = [] def populate_output(snake_case__ , snake_case__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def __lowerCAmelCase ( snake_case__ ): if root is None: return [] __UpperCamelCase : list[Sequence[Node | None]] = [] __UpperCamelCase : List[Any] = 0 __UpperCamelCase : Optional[int] = height(snake_case__ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) ) __UpperCamelCase : str = 1 else: output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) ) __UpperCamelCase : Optional[Any] = 0 return output def __lowerCAmelCase ( ): # Main function for testing. __UpperCamelCase : Dict = make_tree() print(F"In-order Traversal: {inorder(snake_case__ )}" ) print(F"Pre-order Traversal: {preorder(snake_case__ )}" ) print(F"Post-order Traversal: {postorder(snake_case__ )}" , "\n" ) print(F"Height of Tree: {height(snake_case__ )}" , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(snake_case__ ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(snake_case__ ) + 1 ): print(F"Level {level}:" , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) ) print("\nZigZag order Traversal: " ) print(zigzag(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
298
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } __UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCamelCase : Any = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" ) __UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) __UpperCamelCase : Union[str, Any] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) __UpperCamelCase : int = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCamelCase : Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case__ ), } __UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ ) __UpperCamelCase : str = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ , snake_case__ ): __UpperCamelCase : Any = hf_param.shape __UpperCamelCase : List[Any] = to_torch(params[gluon_param] ) __UpperCamelCase : Union[str, Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param __UpperCamelCase : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __UpperCamelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCamelCase : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCamelCase : BertSelfAttention = layer.attention.self __UpperCamelCase : int = check_and_map_params( self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) __UpperCamelCase : str = check_and_map_params( self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) __UpperCamelCase : Tuple = check_and_map_params( self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output __UpperCamelCase : BertSelfOutput = layer.attention.output __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" ) __UpperCamelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate __UpperCamelCase : BertIntermediate = layer.intermediate __UpperCamelCase : Dict = check_and_map_params( intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output __UpperCamelCase : BertOutput = layer.output __UpperCamelCase : Dict = check_and_map_params( bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) __UpperCamelCase : Union[str, Any] = check_and_map_params( bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) __UpperCamelCase : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) __UpperCamelCase : int = check_and_map_params( bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" ) __UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"] # Get gluon output __UpperCamelCase : Dict = mx.nd.array([input_ids] ) __UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) __UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() __UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" ) __UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0] __UpperCamelCase : List[Any] = output_gluon[0].asnumpy() __UpperCamelCase : Optional[int] = output_hf[0].detach().numpy() __UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
'''simple docstring''' import random def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = False ): __UpperCamelCase : dict = {i: [] for i in range(snake_case__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(snake_case__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(snake_case__ ): for j in range(i + 1 , snake_case__ ): if random.random() < probability: graph[i].append(snake_case__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(snake_case__ ) return graph def __lowerCAmelCase ( snake_case__ ): return { i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ ) } if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
1
'''simple docstring''' import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _lowerCAmelCase = '''scheduler_config.json''' class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = 1 A = 2 A = 3 A = 4 A = 5 @dataclass class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = 42 class A : '''simple docstring''' A = SCHEDULER_CONFIG_NAME A = ["dtype"] A = [] A = True @classmethod def a_ (cls , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=_UpperCAmelCase , subfolder=_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase , **_UpperCAmelCase , ) __UpperCamelCase , __UpperCamelCase : Optional[Any] = cls.from_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase , **_UpperCAmelCase ) if hasattr(_UpperCAmelCase , "create_state" ) and getattr(_UpperCAmelCase , "has_state" , _UpperCAmelCase ): __UpperCamelCase : Optional[Any] = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , **_UpperCAmelCase ) -> Optional[int]: self.save_config(save_directory=_UpperCAmelCase , push_to_hub=_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: return self._get_compatibles() @classmethod def a_ (cls ) -> Tuple: __UpperCamelCase : str = list(set([cls.__name__] + cls._compatibles ) ) __UpperCamelCase : Dict = importlib.import_module(__name__.split("." )[0] ) __UpperCamelCase : Tuple = [ getattr(_UpperCAmelCase , _UpperCAmelCase ) for c in compatible_classes_str if hasattr(_UpperCAmelCase , _UpperCAmelCase ) ] return compatible_classes def __lowerCAmelCase ( snake_case__ , snake_case__ ): assert len(snake_case__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case__ ) - x.ndim) ) , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__=0.999 , snake_case__=jnp.floataa ): def alpha_bar(snake_case__ ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 __UpperCamelCase : Optional[Any] = [] for i in range(snake_case__ ): __UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps __UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(snake_case__ ) / alpha_bar(snake_case__ ) , snake_case__ ) ) return jnp.array(snake_case__ , dtype=snake_case__ ) @flax.struct.dataclass class A : '''simple docstring''' A = 42 A = 42 A = 42 @classmethod def a_ (cls , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : Any = scheduler.config if config.trained_betas is not None: __UpperCamelCase : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __UpperCamelCase : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __UpperCamelCase : Tuple = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __UpperCamelCase : Tuple = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" ) __UpperCamelCase : Optional[Any] = 1.0 - betas __UpperCamelCase : Union[str, Any] = jnp.cumprod(_UpperCAmelCase , axis=0 ) return cls( alphas=_UpperCAmelCase , betas=_UpperCAmelCase , alphas_cumprod=_UpperCAmelCase , ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[Any] = state.alphas_cumprod __UpperCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 __UpperCamelCase : str = sqrt_alpha_prod.flatten() __UpperCamelCase : Any = broadcast_to_shape_from_left(snake_case__ , original_samples.shape ) __UpperCamelCase : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5 __UpperCamelCase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten() __UpperCamelCase : Any = broadcast_to_shape_from_left(snake_case__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase , __UpperCamelCase : Union[str, Any] = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) __UpperCamelCase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase , __UpperCamelCase : List[Any] = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) __UpperCamelCase : List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
298
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = x __UpperCamelCase : Optional[int] = y for step in range(snake_case__ ): # noqa: B007 __UpperCamelCase : str = a * a - b * b + x __UpperCamelCase : Union[str, Any] = 2 * a * b + y __UpperCamelCase : List[str] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def __lowerCAmelCase ( snake_case__ ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def __lowerCAmelCase ( snake_case__ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def __lowerCAmelCase ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): __UpperCamelCase : Tuple = Image.new("RGB" , (image_width, image_height) ) __UpperCamelCase : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates __UpperCamelCase : Dict = figure_width / image_width * image_height __UpperCamelCase : List[str] = figure_center_x + (image_x / image_width - 0.5) * figure_width __UpperCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height __UpperCamelCase : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __UpperCamelCase : List[Any] = get_color_coded_rgb(snake_case__ ) else: __UpperCamelCase : Dict = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowerCAmelCase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
298
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
1
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = 1 @register_to_config def __init__(self , _UpperCAmelCase = 1_0_0_0 , _UpperCAmelCase = None ) -> str: # set `betas`, `alphas`, `timesteps` self.set_timesteps(_UpperCAmelCase ) # standard deviation of the initial noise distribution __UpperCamelCase : Optional[int] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __UpperCamelCase : List[Any] = 4 # running values __UpperCamelCase : List[Any] = [] def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Dict: __UpperCamelCase : Optional[Any] = num_inference_steps __UpperCamelCase : Any = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __UpperCamelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __UpperCamelCase : Tuple = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __UpperCamelCase : Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2 __UpperCamelCase : Optional[int] = (1.0 - self.betas**2) ** 0.5 __UpperCamelCase : int = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __UpperCamelCase : Tuple = timesteps.to(_UpperCAmelCase ) __UpperCamelCase : Any = [] def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) __UpperCamelCase : Optional[int] = (self.timesteps == timestep).nonzero().item() __UpperCamelCase : str = timestep_index + 1 __UpperCamelCase : List[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(_UpperCAmelCase ) if len(self.ets ) == 1: __UpperCamelCase : Dict = self.ets[-1] elif len(self.ets ) == 2: __UpperCamelCase : List[Any] = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __UpperCamelCase : Optional[int] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: __UpperCamelCase : int = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) __UpperCamelCase : Optional[int] = self._get_prev_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def a_ (self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> torch.FloatTensor: return sample def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = self.alphas[timestep_index] __UpperCamelCase : Union[str, Any] = self.betas[timestep_index] __UpperCamelCase : Optional[int] = self.alphas[prev_timestep_index] __UpperCamelCase : List[str] = self.betas[prev_timestep_index] __UpperCamelCase : List[str] = (sample - sigma * ets) / max(_UpperCAmelCase , 1E-8 ) __UpperCamelCase : Tuple = next_alpha * pred + ets * next_sigma return prev_sample def __len__(self ) -> Any: return self.config.num_train_timesteps
298
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
1
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class A : '''simple docstring''' @staticmethod def a_ (*_UpperCAmelCase , **_UpperCAmelCase ) -> str: pass def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = np.array(snake_case__ ) __UpperCamelCase : Optional[int] = npimg.shape return {"hash": hashimage(snake_case__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class A ( unittest.TestCase ): '''simple docstring''' A = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Dict = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def a_ (self ) -> int: pass @slow @require_torch def a_ (self ) -> Any: __UpperCamelCase : Optional[int] = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) __UpperCamelCase : List[str] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_5_6 ) # Shortening by hashing __UpperCamelCase : Union[str, Any] = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444}, {"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021}, {"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167}, {"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132}, {"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053}, {"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_967}, {"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993}, {"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_909}, {"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_879}, {"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_834}, {"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_716}, {"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_612}, {"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_599}, {"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_552}, {"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_532}, {"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_516}, {"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_499}, {"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_483}, {"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_464}, {"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943}, {"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943}, {"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_408}, {"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_335}, {"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_326}, {"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_262}, {"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_999}, {"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_986}, {"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_984}, {"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_873}, {"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_871} ] , ) # fmt: on @require_torch @slow def a_ (self ) -> str: __UpperCamelCase : Dict = "facebook/sam-vit-huge" __UpperCamelCase : Optional[Any] = pipeline("mask-generation" , model=_UpperCAmelCase ) __UpperCamelCase : Dict = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_5_6 ) # Shortening by hashing __UpperCamelCase : Optional[int] = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444}, {"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_210}, {"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167}, {"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132}, {"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053}, ] , )
298
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = '''config.json''' _lowerCAmelCase = '''diffusion_pytorch_model.bin''' _lowerCAmelCase = '''diffusion_flax_model.msgpack''' _lowerCAmelCase = '''model.onnx''' _lowerCAmelCase = '''diffusion_pytorch_model.safetensors''' _lowerCAmelCase = '''weights.pb''' _lowerCAmelCase = '''https://huggingface.co''' _lowerCAmelCase = default_cache_path _lowerCAmelCase = '''diffusers_modules''' _lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _lowerCAmelCase = ['''fp16''', '''non-ema'''] _lowerCAmelCase = '''.self_attn'''
298
1
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _lowerCAmelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _lowerCAmelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') _lowerCAmelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowerCAmelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _lowerCAmelCase = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , snake_case__ ) return [m.group(0 ) for m in matches] def __lowerCAmelCase ( ): __UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCamelCase : Any = { config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCamelCase : List[Any] = collections.defaultdict(snake_case__ ) __UpperCamelCase : Tuple = collections.defaultdict(snake_case__ ) __UpperCamelCase : List[str] = collections.defaultdict(snake_case__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(snake_case__ ): __UpperCamelCase : Tuple = None if _re_tf_models.match(snake_case__ ) is not None: __UpperCamelCase : Dict = tf_models __UpperCamelCase : str = _re_tf_models.match(snake_case__ ).groups()[0] elif _re_flax_models.match(snake_case__ ) is not None: __UpperCamelCase : str = flax_models __UpperCamelCase : Any = _re_flax_models.match(snake_case__ ).groups()[0] elif _re_pt_models.match(snake_case__ ) is not None: __UpperCamelCase : Optional[int] = pt_models __UpperCamelCase : Any = _re_pt_models.match(snake_case__ ).groups()[0] if lookup_dict is not None: while len(snake_case__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCamelCase : Tuple = True break # Try again after removing the last word in the name __UpperCamelCase : int = "".join(camel_case_split(snake_case__ )[:-1] ) __UpperCamelCase : str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCamelCase : str = list(snake_case__ ) all_models.sort() __UpperCamelCase : Union[str, Any] = {"model_type": all_models} __UpperCamelCase : str = [pt_models[t] for t in all_models] __UpperCamelCase : int = [tf_models[t] for t in all_models] __UpperCamelCase : List[Any] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCamelCase : List[str] = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCamelCase : int = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCamelCase : Optional[Any] = "AutoTokenizer" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCamelCase : Tuple = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCamelCase : Optional[int] = "AutoTokenizer" __UpperCamelCase : Optional[int] = [processors[t] for t in all_models] return pd.DataFrame(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCamelCase : List[Any] = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"] __UpperCamelCase : List[Any] = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(snake_case__ , snake_case__ , snake_case__ ): # The type of pipeline may not exist in this framework if not hasattr(snake_case__ , snake_case__ ): continue # First extract all model_names __UpperCamelCase : List[Any] = [] for name in getattr(snake_case__ , snake_case__ ).values(): if isinstance(snake_case__ , snake_case__ ): model_names.append(snake_case__ ) else: model_names.extend(list(snake_case__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Any = get_frameworks_table() __UpperCamelCase : Union[str, Any] = Dataset.from_pandas(snake_case__ ) __UpperCamelCase : Optional[int] = hf_hub_download( "huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=snake_case__ ) __UpperCamelCase : Any = Dataset.from_json(snake_case__ ) __UpperCamelCase : int = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(snake_case__ ) ) } __UpperCamelCase : Any = update_pipeline_and_auto_class_table(snake_case__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCamelCase : List[Any] = sorted(table.keys() ) __UpperCamelCase : Dict = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) __UpperCamelCase : Optional[int] = Dataset.from_pandas(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(snake_case__ , "frameworks.json" ) ) tags_dataset.to_json(os.path.join(snake_case__ , "pipeline_tags.json" ) ) if commit_sha is not None: __UpperCamelCase : int = ( F"Update with commit {commit_sha}\n\nSee: " F"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: __UpperCamelCase : Dict = "Update" upload_folder( repo_id="huggingface/transformers-metadata" , folder_path=snake_case__ , repo_type="dataset" , token=snake_case__ , commit_message=snake_case__ , ) def __lowerCAmelCase ( ): __UpperCamelCase : Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCamelCase : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS __UpperCamelCase : List[Any] = [] for key in pipeline_tasks: if key not in in_table: __UpperCamelCase : Optional[Any] = pipeline_tasks[key]["pt"] if isinstance(snake_case__ , (list, tuple) ): __UpperCamelCase : List[str] = model[0] __UpperCamelCase : Union[str, Any] = model.__name__ if model not in in_table.values(): missing.append(snake_case__ ) if len(snake_case__ ) > 0: __UpperCamelCase : Optional[Any] = ", ".join(snake_case__ ) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " F"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') _lowerCAmelCase = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
298
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : List[str] = 1_3 __UpperCamelCase : List[Any] = 7 __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True __UpperCamelCase : str = True __UpperCamelCase : List[Any] = 9_9 __UpperCamelCase : Union[str, Any] = 3_8_4 __UpperCamelCase : str = 2 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : Any = 3_7 __UpperCamelCase : str = "gelu" __UpperCamelCase : Optional[Any] = 0.1 __UpperCamelCase : str = 0.1 __UpperCamelCase : str = 5_1_2 __UpperCamelCase : Optional[Any] = 1_6 __UpperCamelCase : Dict = 2 __UpperCamelCase : Optional[int] = 0.02 __UpperCamelCase : List[Any] = 3 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : int = 1_2_8 __UpperCamelCase : Tuple = 2 __UpperCamelCase : str = 9 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : Any = None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = None if self.use_token_type_ids: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : List[Any] = None __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Optional[Any] = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCamelCase : Optional[Any] = [input_ids, input_mask] __UpperCamelCase : str = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __UpperCamelCase : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_choices __UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = self.num_labels __UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self ) -> str: __UpperCamelCase : str = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Any = config_and_inputs __UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = TFConvBertModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> Dict: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : int = True if hasattr(_UpperCAmelCase , "use_cache" ): __UpperCamelCase : List[Any] = True __UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = model_class(_UpperCAmelCase ) __UpperCamelCase : Any = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" ) __UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase ) __UpperCamelCase : Dict = model(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : Any = outputs["encoder_hidden_states"] __UpperCamelCase : Tuple = outputs["encoder_attentions"] else: __UpperCamelCase : Tuple = outputs["hidden_states"] __UpperCamelCase : Optional[int] = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : Any = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Dict = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __UpperCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __UpperCamelCase : Any = True __UpperCamelCase : Dict = False __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = model_class(_UpperCAmelCase ) __UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : str = True __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0] __UpperCamelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Any = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
298
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = StableDiffusionInpaintPipeline A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A = frozenset([] ) def a_ (self ) -> Any: torch.manual_seed(0 ) __UpperCamelCase : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , ) __UpperCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase ) torch.manual_seed(0 ) __UpperCamelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) __UpperCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) __UpperCamelCase : Dict = CLIPTextModel(_UpperCAmelCase ) __UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCamelCase : int = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> int: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched __UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) ) __UpperCamelCase : Optional[int] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((6_4, 6_4) ) if str(_UpperCAmelCase ).startswith("mps" ): __UpperCamelCase : List[Any] = torch.manual_seed(_UpperCAmelCase ) else: __UpperCamelCase : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Dict = self.get_dummy_components() __UpperCamelCase : Dict = StableDiffusionInpaintPipeline(**_UpperCAmelCase ) __UpperCamelCase : Tuple = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase ) __UpperCamelCase : Any = sd_pipe(**_UpperCAmelCase ).images __UpperCamelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase : Optional[Any] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a_ (self ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ (self ) -> int: __UpperCamelCase : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) __UpperCamelCase : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) __UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) __UpperCamelCase : Optional[int] = "stabilityai/stable-diffusion-2-inpainting" __UpperCamelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() __UpperCamelCase : List[Any] = "Face of a yellow cat, high resolution, sitting on a park bench" __UpperCamelCase : str = torch.manual_seed(0 ) __UpperCamelCase : Dict = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , ) __UpperCamelCase : Any = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a_ (self ) -> str: __UpperCamelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) __UpperCamelCase : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) __UpperCamelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) __UpperCamelCase : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" __UpperCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() __UpperCamelCase : List[Any] = "Face of a yellow cat, high resolution, sitting on a park bench" __UpperCamelCase : Optional[Any] = torch.manual_seed(0 ) __UpperCamelCase : str = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , ) __UpperCamelCase : str = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a_ (self ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCamelCase : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) __UpperCamelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) __UpperCamelCase : List[Any] = "stabilityai/stable-diffusion-2-inpainting" __UpperCamelCase : List[Any] = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder="scheduler" ) __UpperCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __UpperCamelCase : Optional[Any] = "Face of a yellow cat, high resolution, sitting on a park bench" __UpperCamelCase : Tuple = torch.manual_seed(0 ) __UpperCamelCase : Optional[Any] = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) __UpperCamelCase : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
298
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() @dataclass class A : '''simple docstring''' A = 42 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase ) -> Optional[int]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def a_ (self ) -> Tuple: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A : '''simple docstring''' A = 42 A = 42 A = 0 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def __call__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized __UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" f" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ): print(F"Converting {name}..." ) with torch.no_grad(): __UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval() __UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ ) __UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(snake_case__ ) assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one." __UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}" print(snake_case__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , ) # we can use the convnext one __UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , ) print(F"Pushed {checkpoint_name}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Any = 1_000 __UpperCamelCase : List[str] = (1, num_labels) __UpperCamelCase : List[str] = "huggingface/label-files" __UpperCamelCase : str = num_labels __UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Any = idalabel __UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) __UpperCamelCase : Dict = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
298
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCAmelCase = logging.get_logger(__name__) def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Dict = b.T __UpperCamelCase : Optional[Any] = np.sum(np.square(snake_case__ ) , axis=1 ) __UpperCamelCase : Tuple = np.sum(np.square(snake_case__ ) , axis=0 ) __UpperCamelCase : Optional[int] = np.matmul(snake_case__ , snake_case__ ) __UpperCamelCase : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :] return d def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[str] = x.reshape(-1 , 3 ) __UpperCamelCase : Optional[Any] = squared_euclidean_distance(snake_case__ , snake_case__ ) return np.argmin(snake_case__ , axis=1 ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["pixel_values"] def __init__(self , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Tuple = size if size is not None else {"height": 2_5_6, "width": 2_5_6} __UpperCamelCase : int = get_size_dict(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = np.array(_UpperCAmelCase ) if clusters is not None else None __UpperCamelCase : str = do_resize __UpperCamelCase : List[Any] = size __UpperCamelCase : Union[str, Any] = resample __UpperCamelCase : Dict = do_normalize __UpperCamelCase : str = do_color_quantize def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: __UpperCamelCase : Any = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" ) return resize( _UpperCAmelCase , size=(size["height"], size["width"]) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , ) -> np.ndarray: __UpperCamelCase : Any = rescale(image=_UpperCAmelCase , scale=1 / 127.5 , data_format=_UpperCAmelCase ) __UpperCamelCase : str = image - 1 return image def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: __UpperCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize __UpperCamelCase : List[Any] = size if size is not None else self.size __UpperCamelCase : List[Any] = get_size_dict(_UpperCAmelCase ) __UpperCamelCase : str = resample if resample is not None else self.resample __UpperCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase : List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __UpperCamelCase : Any = clusters if clusters is not None else self.clusters __UpperCamelCase : Optional[int] = np.array(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. __UpperCamelCase : Tuple = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: __UpperCamelCase : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_normalize: __UpperCamelCase : Dict = [self.normalize(image=_UpperCAmelCase ) for image in images] if do_color_quantize: __UpperCamelCase : Any = [to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __UpperCamelCase : Optional[Any] = np.array(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = color_quantize(_UpperCAmelCase , _UpperCAmelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) __UpperCamelCase : Any = images.shape[0] __UpperCamelCase : Union[str, Any] = images.reshape(_UpperCAmelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. __UpperCamelCase : Any = list(_UpperCAmelCase ) else: __UpperCamelCase : Optional[int] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] __UpperCamelCase : Union[str, Any] = {"input_ids": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Dict = parser.parse_args() return args.f class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> None: __UpperCamelCase : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Any = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): __UpperCamelCase : Tuple = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_UpperCAmelCase , 0.666 ) @slow @require_torch_non_multi_gpu def a_ (self ) -> Any: __UpperCamelCase : Dict = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(_UpperCAmelCase ) __UpperCamelCase : List[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(_UpperCAmelCase ) __UpperCamelCase : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(_UpperCAmelCase )
298
'''simple docstring''' from maths.prime_check import is_prime def __lowerCAmelCase ( snake_case__ ): if not isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCAmelCase = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : List[str] = state_dict.pop(snake_case__ ) __UpperCamelCase : Tuple = val def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __UpperCamelCase : Union[str, Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) __UpperCamelCase : int = value else: __UpperCamelCase : Any = value return new_state_dict def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase : Union[str, Any] = "" if is_panoptic: __UpperCamelCase : Optional[int] = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __UpperCamelCase : Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) __UpperCamelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __UpperCamelCase : Union[str, Any] = in_proj_weight[:256, :] __UpperCamelCase : List[str] = in_proj_bias[:256] __UpperCamelCase : str = in_proj_weight[256:512, :] __UpperCamelCase : int = in_proj_bias[256:512] __UpperCamelCase : int = in_proj_weight[-256:, :] __UpperCamelCase : Any = in_proj_bias[-256:] def __lowerCAmelCase ( ): __UpperCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCamelCase : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: __UpperCamelCase : Any = "resnet101" if "dc5" in model_name: __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : List[Any] = "panoptic" in model_name if is_panoptic: __UpperCamelCase : Optional[int] = 250 else: __UpperCamelCase : str = 91 __UpperCamelCase : Optional[int] = "huggingface/label-files" __UpperCamelCase : Any = "coco-detection-id2label.json" __UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Dict = idalabel __UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} # load image processor __UpperCamelCase : int = "coco_panoptic" if is_panoptic else "coco_detection" __UpperCamelCase : Union[str, Any] = ConditionalDetrImageProcessor(format=snake_case__ ) # prepare image __UpperCamelCase : List[Any] = prepare_img() __UpperCamelCase : str = image_processor(images=snake_case__ , return_tensors="pt" ) __UpperCamelCase : str = encoding["pixel_values"] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub __UpperCamelCase : Tuple = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Optional[int] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: __UpperCamelCase : str = "conditional_detr." + src rename_key(snake_case__ , snake_case__ , snake_case__ ) __UpperCamelCase : Any = rename_backbone_keys(snake_case__ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __UpperCamelCase : Optional[Any] = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): __UpperCamelCase : List[Any] = state_dict.pop(snake_case__ ) __UpperCamelCase : List[Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __UpperCamelCase : Tuple = state_dict.pop(snake_case__ ) __UpperCamelCase : Dict = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: __UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ ) __UpperCamelCase : Optional[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): __UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ ) __UpperCamelCase : List[Any] = val # finally, create HuggingFace model and load state dict __UpperCamelCase : Optional[Any] = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() model.push_to_hub(repo_id=snake_case__ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion __UpperCamelCase : Dict = conditional_detr(snake_case__ ) __UpperCamelCase : Any = model(snake_case__ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: __UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy) __UpperCamelCase : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCAmelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
298
1
'''simple docstring''' import numpy as np def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 100 , ): assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1] # Ensure proper dimensionality. assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ ) __UpperCamelCase : List[Any] = np.iscomplexobj(snake_case__ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(snake_case__ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __UpperCamelCase : int = False __UpperCamelCase : Dict = 0 __UpperCamelCase : Dict = 0 __UpperCamelCase : Tuple = 1E12 while not convergence: # Multiple matrix by the vector. __UpperCamelCase : Union[str, Any] = np.dot(snake_case__ , snake_case__ ) # Normalize the resulting output vector. __UpperCamelCase : Dict = w / np.linalg.norm(snake_case__ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __UpperCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T __UpperCamelCase : Union[str, Any] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) ) # Check convergence. __UpperCamelCase : int = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __UpperCamelCase : Tuple = True __UpperCamelCase : Tuple = lambda_ if is_complex: __UpperCamelCase : Union[str, Any] = np.real(lambda_ ) return lambda_, vector def __lowerCAmelCase ( ): __UpperCamelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __UpperCamelCase : Dict = np.array([41, 4, 20] ) __UpperCamelCase : str = real_input_matrix.astype(np.complexaaa ) __UpperCamelCase : List[Any] = np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __UpperCamelCase : str = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __UpperCamelCase : Optional[Any] = real_input_matrix __UpperCamelCase : str = real_vector elif problem_type == "complex": __UpperCamelCase : str = complex_input_matrix __UpperCamelCase : Any = complex_vector # Our implementation. __UpperCamelCase , __UpperCamelCase : Optional[int] = power_iteration(snake_case__ , snake_case__ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __UpperCamelCase , __UpperCamelCase : List[str] = np.linalg.eigh(snake_case__ ) # Last eigenvalue is the maximum one. __UpperCamelCase : Optional[int] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __UpperCamelCase : List[str] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
298
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCAmelCase = '''src/transformers''' _lowerCAmelCase = '''docs/source/en/tasks''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCamelCase : str = f.readlines() # Find the start prompt. __UpperCamelCase : Dict = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 __UpperCamelCase : Dict = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) _lowerCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) __UpperCamelCase : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
298
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _lowerCAmelCase = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' _lowerCAmelCase = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' _lowerCAmelCase = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' _lowerCAmelCase = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' _lowerCAmelCase = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=[1, 1_0, 1_0_0] , _UpperCAmelCase=4 , _UpperCAmelCase=3.0 ) -> Optional[Any]: if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: __UpperCamelCase : int = [] __UpperCamelCase : Union[str, Any] = Counter() __UpperCamelCase : Optional[int] = 0 __UpperCamelCase : str = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: __UpperCamelCase : Union[str, Any] = candidate + "\n" + test_case __UpperCamelCase : Dict = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase : int = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): __UpperCamelCase : Any = future.result() results[result["task_id"]].append((result["completion_id"], result) ) __UpperCamelCase , __UpperCamelCase : str = [], [] for result in results.values(): result.sort() __UpperCamelCase : int = [r[1]["passed"] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) __UpperCamelCase : Optional[int] = np.array(_UpperCAmelCase ) __UpperCamelCase : int = np.array(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = k __UpperCamelCase : str = {f"pass@{k}": estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def estimator(snake_case__ , snake_case__ , snake_case__ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Tuple = itertools.repeat(snake_case__ , len(snake_case__ ) ) else: assert len(snake_case__ ) == len(snake_case__ ) __UpperCamelCase : Optional[int] = iter(snake_case__ ) return np.array([estimator(int(snake_case__ ) , int(snake_case__ ) , snake_case__ ) for n, c in zip(snake_case__ , snake_case__ )] )
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "blenderbot-small" A = ["past_key_values"] A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__(self , _UpperCAmelCase=5_0_2_6_5 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=8 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=1_6 , _UpperCAmelCase=8 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=1_6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , **_UpperCAmelCase , ) -> Dict: __UpperCamelCase : Union[str, Any] = vocab_size __UpperCamelCase : Tuple = max_position_embeddings __UpperCamelCase : Optional[int] = d_model __UpperCamelCase : Dict = encoder_ffn_dim __UpperCamelCase : Union[str, Any] = encoder_layers __UpperCamelCase : int = encoder_attention_heads __UpperCamelCase : Any = decoder_ffn_dim __UpperCamelCase : str = decoder_layers __UpperCamelCase : Dict = decoder_attention_heads __UpperCamelCase : Dict = dropout __UpperCamelCase : str = attention_dropout __UpperCamelCase : Tuple = activation_dropout __UpperCamelCase : Optional[Any] = activation_function __UpperCamelCase : int = init_std __UpperCamelCase : Dict = encoder_layerdrop __UpperCamelCase : Any = decoder_layerdrop __UpperCamelCase : str = use_cache __UpperCamelCase : str = encoder_layers __UpperCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @property def a_ (self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase : str = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __UpperCamelCase : List[str] = {0: "batch"} __UpperCamelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: __UpperCamelCase : List[str] = {0: "batch", 1: "decoder_sequence"} __UpperCamelCase : Optional[Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. __UpperCamelCase : str = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.num_layers for i in range(_UpperCAmelCase ): __UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} __UpperCamelCase : str = {0: "batch", 2: "past_sequence + sequence"} else: __UpperCamelCase : List[str] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def a_ (self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase : List[str] = super().outputs else: __UpperCamelCase : Optional[int] = super(_UpperCAmelCase , self ).outputs if self.use_past: __UpperCamelCase , __UpperCamelCase : Tuple = self.num_layers for i in range(_UpperCAmelCase ): __UpperCamelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"} __UpperCamelCase : str = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: __UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Generate decoder inputs __UpperCamelCase : Dict = seq_length if not self.use_past else 1 __UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Any = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __UpperCamelCase : Optional[Any] = dict(**_UpperCAmelCase , **_UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __UpperCamelCase , __UpperCamelCase : Dict = common_inputs["input_ids"].shape __UpperCamelCase : Tuple = common_inputs["decoder_input_ids"].shape[1] __UpperCamelCase , __UpperCamelCase : Optional[int] = self.num_attention_heads __UpperCamelCase : Optional[int] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase : str = decoder_seq_length + 3 __UpperCamelCase : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCamelCase : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase )] , dim=1 ) __UpperCamelCase : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCamelCase , __UpperCamelCase : int = self.num_layers __UpperCamelCase : Tuple = min(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : str = max(_UpperCAmelCase , _UpperCAmelCase ) - min_num_layers __UpperCamelCase : Tuple = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase ), ) ) # TODO: test this. __UpperCamelCase : List[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_UpperCAmelCase , _UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) ) return common_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: __UpperCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __UpperCamelCase , __UpperCamelCase : Tuple = common_inputs["input_ids"].shape # Not using the same length for past_key_values __UpperCamelCase : Optional[Any] = seqlen + 2 __UpperCamelCase , __UpperCamelCase : str = self.num_layers __UpperCamelCase , __UpperCamelCase : List[str] = self.num_attention_heads __UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase : List[Any] = common_inputs["attention_mask"].dtype __UpperCamelCase : Union[str, Any] = torch.cat( [common_inputs["attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) __UpperCamelCase : Optional[int] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(_UpperCAmelCase ) ] return common_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __UpperCamelCase : List[Any] = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCamelCase : List[str] = tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __UpperCamelCase : List[Any] = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __UpperCamelCase : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size __UpperCamelCase : Any = dict(tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) ) return common_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) elif self.task == "causal-lm": __UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) else: __UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) return common_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase : Tuple = super()._flatten_past_key_values_(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __UpperCamelCase : str = super(_UpperCAmelCase , self )._flatten_past_key_values_( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] ) def __lowerCAmelCase ( snake_case__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : List[str] = tf.convert_to_tensor(snake_case__ ) __UpperCamelCase : Tuple = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = tf.convert_to_tensor(snake_case__ ) __UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype ) __UpperCamelCase : Optional[Any] = tf.cast(0.044715 , x.dtype ) __UpperCamelCase : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case__ , 3 )) )) return x * cdf def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = tf.convert_to_tensor(snake_case__ ) return x * tf.tanh(tf.math.softplus(snake_case__ ) ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(snake_case__ ) __UpperCamelCase : Tuple = tf.cast(0.044715 , x.dtype ) __UpperCamelCase : Dict = tf.cast(0.7978845608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = tf.convert_to_tensor(snake_case__ ) __UpperCamelCase : Tuple = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __lowerCAmelCase ( snake_case__ ): return tf.clip_by_value(_gelu(snake_case__ ) , -10 , 10 ) def __lowerCAmelCase ( snake_case__ , snake_case__=-1 ): __UpperCamelCase , __UpperCamelCase : Union[str, Any] = tf.split(snake_case__ , 2 , axis=snake_case__ ) return a * tf.math.sigmoid(snake_case__ ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def __lowerCAmelCase ( snake_case__ ): return tf.keras.activations.gelu(snake_case__ , approximate=snake_case__ ) _lowerCAmelCase = tf.keras.activations.gelu _lowerCAmelCase = approximate_gelu_wrap else: _lowerCAmelCase = _gelu _lowerCAmelCase = _gelu_new _lowerCAmelCase = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def __lowerCAmelCase ( snake_case__ ): if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
298
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Optional[Any] = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ): __UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: return json.load(snake_case__ ) raise ValueError(F"can't find {path}" ) _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_glue.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_clm_flax.main() __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_summarization_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def a_ (self ) -> int: __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_mlm_flax.main() __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_ta_mlm_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def a_ (self ) -> Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_ner.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_qa.main() __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
298
1
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() @dataclass class A : '''simple docstring''' A = 42 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase ) -> Optional[int]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def a_ (self ) -> Tuple: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A : '''simple docstring''' A = 42 A = 42 A = 0 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def __call__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized __UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" f" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ): print(F"Converting {name}..." ) with torch.no_grad(): __UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval() __UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ ) __UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(snake_case__ ) assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one." __UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}" print(snake_case__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , ) # we can use the convnext one __UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , ) print(F"Pushed {checkpoint_name}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Any = 1_000 __UpperCamelCase : List[str] = (1, num_labels) __UpperCamelCase : List[str] = "huggingface/label-files" __UpperCamelCase : str = num_labels __UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Any = idalabel __UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) __UpperCamelCase : Dict = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
298
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int: __UpperCamelCase : List[str] = parent __UpperCamelCase : str = batch_size __UpperCamelCase : str = decoder_seq_length # For common tests __UpperCamelCase : Optional[int] = self.decoder_seq_length __UpperCamelCase : Any = is_training __UpperCamelCase : Tuple = use_attention_mask __UpperCamelCase : Optional[int] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : Optional[int] = d_model __UpperCamelCase : Union[str, Any] = d_model __UpperCamelCase : int = decoder_layers __UpperCamelCase : Dict = decoder_layers __UpperCamelCase : str = decoder_ffn_dim __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : List[Any] = eos_token_id __UpperCamelCase : int = bos_token_id __UpperCamelCase : Tuple = pad_token_id __UpperCamelCase : Tuple = decoder_start_token_id __UpperCamelCase : Dict = use_cache __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : int = None __UpperCamelCase : Optional[int] = decoder_seq_length __UpperCamelCase : Optional[int] = 2 __UpperCamelCase : Optional[int] = 1 def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : int = None if self.use_attention_mask: __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : Optional[Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]: __UpperCamelCase : List[Any] = True __UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() __UpperCamelCase : Optional[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) __UpperCamelCase : List[Any] = model(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) __UpperCamelCase : List[Any] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"] __UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"] # select random slice __UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs __UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A = (TrOCRForCausalLM,) if is_torch_available() else () A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} A = True A = False def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase ) __UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase ) def a_ (self ) -> Dict: pass def a_ (self ) -> Optional[int]: pass def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> List[Any]: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase ) def a_ (self ) -> Any: return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def a_ (self ) -> Tuple: pass
298
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: _lowerCAmelCase = None _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } _lowerCAmelCase = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off _lowerCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = VOCAB_FILES_NAMES A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = PRETRAINED_VOCAB_FILES_MAP A = ["input_ids", "attention_mask"] A = MBartTokenizer A = [] A = [] def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( vocab_file=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) __UpperCamelCase : List[str] = vocab_file __UpperCamelCase : Tuple = False if not self.vocab_file else True __UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) __UpperCamelCase : Optional[Any] = { lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __UpperCamelCase : Union[str, Any] = src_lang if src_lang is not None else "en_XX" __UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang ) __UpperCamelCase : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a_ (self ) -> str: return self._src_lang @src_lang.setter def a_ (self , _UpperCAmelCase ) -> None: __UpperCamelCase : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: __UpperCamelCase : List[Any] = [self.sep_token_id] __UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __UpperCamelCase : Any = src_lang __UpperCamelCase : Optional[int] = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(_UpperCAmelCase ) __UpperCamelCase : str = tgt_lang_id return inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = "en_XX" , _UpperCAmelCase = None , _UpperCAmelCase = "ro_RO" , **_UpperCAmelCase , ) -> BatchEncoding: __UpperCamelCase : Tuple = src_lang __UpperCamelCase : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def a_ (self ) -> str: return self.set_src_lang_special_tokens(self.src_lang ) def a_ (self ) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ (self , _UpperCAmelCase ) -> None: __UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = [] __UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code] __UpperCamelCase : Any = self.convert_ids_to_tokens(self.prefix_tokens ) __UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) __UpperCamelCase : List[str] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a_ (self , _UpperCAmelCase ) -> None: __UpperCamelCase : List[str] = self.convert_tokens_to_ids(_UpperCAmelCase ) __UpperCamelCase : str = [] __UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code] __UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) __UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) __UpperCamelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory." ) return __UpperCamelCase : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
298
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A : '''simple docstring''' def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: return None class A : '''simple docstring''' def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: return None class A ( unittest.TestCase ): '''simple docstring''' A = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a_ (self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(_UpperCAmelCase , "tf" , 1_2 , **_UpperCAmelCase ) @require_torch @slow def a_ (self ) -> Tuple: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(_UpperCAmelCase , "pt" , 1_2 , **_UpperCAmelCase ) @require_torch @slow def a_ (self ) -> Optional[int]: from transformers import BertModel __UpperCamelCase : List[str] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(_UpperCAmelCase ) ) vocab_file.flush() __UpperCamelCase : int = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __UpperCamelCase : Optional[int] = BertModel(BertConfig(vocab_size=len(_UpperCAmelCase ) ) ) model.save_pretrained(_UpperCAmelCase ) self._test_export(_UpperCAmelCase , "pt" , 1_2 , _UpperCAmelCase ) @require_tf @slow def a_ (self ) -> Tuple: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCamelCase : Optional[Any] = self._test_export(_UpperCAmelCase , "tf" , 1_2 , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = quantize(Path(_UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def a_ (self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCamelCase : List[str] = self._test_export(_UpperCAmelCase , "pt" , 1_2 , **_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = quantize(_UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Tuple: try: # Compute path with TemporaryDirectory() as tempdir: __UpperCamelCase : Tuple = Path(_UpperCAmelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) return path except Exception as e: self.fail(_UpperCAmelCase ) @require_torch @require_tokenizers @slow def a_ (self ) -> Union[str, Any]: from transformers import BertModel __UpperCamelCase : Tuple = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) __UpperCamelCase : int = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , "pt" ) @require_tf @require_tokenizers @slow def a_ (self ) -> str: from transformers import TFBertModel __UpperCamelCase : int = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) __UpperCamelCase : Tuple = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , "tf" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: __UpperCamelCase : List[str] = FeatureExtractionPipeline(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Any = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = infer_shapes(_UpperCAmelCase , _UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , _UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] , _UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def a_ (self ) -> List[Any]: __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask", "token_type_ids"] __UpperCamelCase : List[str] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} __UpperCamelCase , __UpperCamelCase : Tuple = ensure_valid_input(FuncContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(_UpperCAmelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(_UpperCAmelCase ) , set(_UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(_UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __UpperCamelCase , __UpperCamelCase : Union[str, Any] = ensure_valid_input(FuncNonContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(_UpperCAmelCase ) , 1 ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __lowerCAmelCase ( snake_case__ ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _lowerCAmelCase = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @staticmethod def a_ (_UpperCAmelCase ) -> Dict: __UpperCamelCase : int = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=_UpperCAmelCase , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , ) -> List[Any]: __UpperCamelCase : Optional[int] = logging.get_logger("transformers-cli/converting" ) self._logger.info(f"Loading model {model_type}" ) __UpperCamelCase : Union[str, Any] = model_type __UpperCamelCase : Optional[int] = tf_checkpoint __UpperCamelCase : Any = pytorch_dump_output __UpperCamelCase : List[Any] = config __UpperCamelCase : Dict = finetuning_task_name def a_ (self ) -> Optional[int]: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): __UpperCamelCase : str = self._tf_checkpoint __UpperCamelCase : Union[str, Any] = "" else: __UpperCamelCase : Dict = self._tf_checkpoint __UpperCamelCase : Optional[Any] = "" convert_transfo_xl_checkpoint_to_pytorch( _UpperCAmelCase , self._config , self._pytorch_dump_output , _UpperCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations(snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations_with_dp_array( snake_case__ , snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase : List[str] = answer return answer __UpperCamelCase : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [0] * (target + 1) __UpperCamelCase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = 3 _lowerCAmelCase = 5 _lowerCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
1
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __lowerCAmelCase ( snake_case__ , snake_case__="shi-labs/oneformer_demo" ): with open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) as f: __UpperCamelCase : Union[str, Any] = json.load(snake_case__ ) __UpperCamelCase : Optional[int] = {} __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = [] for key, info in class_info.items(): __UpperCamelCase : Dict = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(snake_case__ ) ) __UpperCamelCase : Union[str, Any] = thing_ids __UpperCamelCase : Any = class_names return metadata class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=1_0 , _UpperCAmelCase=False , _UpperCAmelCase=2_5_5 , _UpperCAmelCase="shi-labs/oneformer_demo" , _UpperCAmelCase="ade20k_panoptic.json" , _UpperCAmelCase=1_0 , ) -> str: __UpperCamelCase : Optional[int] = parent __UpperCamelCase : Dict = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Union[str, Any] = min_resolution __UpperCamelCase : List[Any] = max_resolution __UpperCamelCase : str = do_resize __UpperCamelCase : Union[str, Any] = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size __UpperCamelCase : Union[str, Any] = do_normalize __UpperCamelCase : str = image_mean __UpperCamelCase : Optional[Any] = image_std __UpperCamelCase : Optional[int] = class_info_file __UpperCamelCase : str = prepare_metadata(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Optional[Any] = num_text __UpperCamelCase : Dict = repo_path # for the post_process_functions __UpperCamelCase : List[str] = 2 __UpperCamelCase : Tuple = 1_0 __UpperCamelCase : Optional[Any] = 1_0 __UpperCamelCase : Optional[Any] = 3 __UpperCamelCase : int = 4 __UpperCamelCase : List[Any] = num_labels __UpperCamelCase : Any = do_reduce_labels __UpperCamelCase : Optional[Any] = ignore_index def a_ (self ) -> Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> str: if not batched: __UpperCamelCase : int = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Any = image.size else: __UpperCamelCase , __UpperCamelCase : List[str] = image.shape[1], image.shape[2] if w < h: __UpperCamelCase : int = int(self.size["shortest_edge"] * h / w ) __UpperCamelCase : Any = self.size["shortest_edge"] elif w > h: __UpperCamelCase : str = self.size["shortest_edge"] __UpperCamelCase : Tuple = int(self.size["shortest_edge"] * w / h ) else: __UpperCamelCase : Union[str, Any] = self.size["shortest_edge"] __UpperCamelCase : int = self.size["shortest_edge"] else: __UpperCamelCase : str = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : List[str] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width def a_ (self ) -> int: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string A = image_processing_class def a_ (self ) -> List[str]: __UpperCamelCase : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processing_tester.prepare_image_processor_dict() def a_ (self ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "ignore_index" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "class_info_file" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "num_text" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "repo_path" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "metadata" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_reduce_labels" ) ) def a_ (self ) -> Any: pass def a_ (self ) -> int: # Initialize image_processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase , __UpperCamelCase : str = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = image_processor( _UpperCAmelCase , ["semantic"] * len(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Optional[int]: # Initialize image_processor __UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Dict = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __UpperCamelCase : List[Any] = image_processor( _UpperCAmelCase , ["semantic"] * len(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> List[Any]: # Initialize image_processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : Dict = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __UpperCamelCase : Tuple = image_processor( _UpperCAmelCase , ["semantic"] * len(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="np" ) -> str: __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # prepare image and target __UpperCamelCase : List[Any] = self.image_processing_tester.num_labels __UpperCamelCase : List[str] = None __UpperCamelCase : str = None __UpperCamelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase ) if with_segmentation_maps: __UpperCamelCase : List[Any] = num_labels if is_instance_map: __UpperCamelCase : Union[str, Any] = list(range(_UpperCAmelCase ) ) * 2 __UpperCamelCase : int = dict(enumerate(_UpperCAmelCase ) ) __UpperCamelCase : int = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": __UpperCamelCase : Dict = [Image.fromarray(_UpperCAmelCase ) for annotation in annotations] __UpperCamelCase : Any = image_processor( _UpperCAmelCase , ["semantic"] * len(_UpperCAmelCase ) , _UpperCAmelCase , return_tensors="pt" , instance_id_to_semantic_id=_UpperCAmelCase , pad_and_return_pixel_mask=_UpperCAmelCase , ) return inputs def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> List[Any]: def common(_UpperCAmelCase=False , _UpperCAmelCase=None ): __UpperCamelCase : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_UpperCAmelCase , is_instance_map=_UpperCAmelCase , segmentation_type=_UpperCAmelCase ) __UpperCamelCase : int = inputs["mask_labels"] __UpperCamelCase : int = inputs["class_labels"] __UpperCamelCase : List[str] = inputs["pixel_values"] __UpperCamelCase : str = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_UpperCAmelCase ) common(is_instance_map=_UpperCAmelCase , segmentation_type="pil" ) common(is_instance_map=_UpperCAmelCase , segmentation_type="pil" ) def a_ (self ) -> Tuple: __UpperCamelCase : List[str] = np.zeros((2_0, 5_0) ) __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : str = 1 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : int = binary_mask_to_rle(_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) self.assertEqual(rle[0] , 2_1 ) self.assertEqual(rle[1] , 4_5 ) def a_ (self ) -> List[Any]: __UpperCamelCase : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) __UpperCamelCase : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCamelCase : str = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) __UpperCamelCase : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] __UpperCamelCase : List[Any] = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase , target_sizes=_UpperCAmelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def a_ (self ) -> int: __UpperCamelCase : Dict = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) __UpperCamelCase : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCamelCase : int = image_processor.post_process_instance_segmentation(_UpperCAmelCase , threshold=0 ) self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , _UpperCAmelCase ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def a_ (self ) -> str: __UpperCamelCase : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) __UpperCamelCase : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCamelCase : int = image_processor.post_process_panoptic_segmentation(_UpperCAmelCase , threshold=0 ) self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , _UpperCAmelCase ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
298
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
1
'''simple docstring''' _lowerCAmelCase = 0 # The first color of the flag. _lowerCAmelCase = 1 # The second color of the flag. _lowerCAmelCase = 2 # The third color of the flag. _lowerCAmelCase = (red, white, blue) def __lowerCAmelCase ( snake_case__ ): if not sequence: return [] if len(snake_case__ ) == 1: return list(snake_case__ ) __UpperCamelCase : List[str] = 0 __UpperCamelCase : Optional[int] = len(snake_case__ ) - 1 __UpperCamelCase : Dict = 0 while mid <= high: if sequence[mid] == colors[0]: __UpperCamelCase , __UpperCamelCase : List[Any] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __UpperCamelCase , __UpperCamelCase : Dict = sequence[high], sequence[mid] high -= 1 else: __UpperCamelCase : Optional[Any] = F"The elements inside the sequence must contains only {colors} values" raise ValueError(snake_case__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = input('''Enter numbers separated by commas:\n''').strip() _lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] print(f'{dutch_national_flag_sort(unsorted)}')
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } __UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCamelCase : Any = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" ) __UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) __UpperCamelCase : Union[str, Any] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) __UpperCamelCase : int = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCamelCase : Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case__ ), } __UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ ) __UpperCamelCase : str = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ , snake_case__ ): __UpperCamelCase : Any = hf_param.shape __UpperCamelCase : List[Any] = to_torch(params[gluon_param] ) __UpperCamelCase : Union[str, Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param __UpperCamelCase : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __UpperCamelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCamelCase : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCamelCase : BertSelfAttention = layer.attention.self __UpperCamelCase : int = check_and_map_params( self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) __UpperCamelCase : str = check_and_map_params( self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) __UpperCamelCase : Tuple = check_and_map_params( self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output __UpperCamelCase : BertSelfOutput = layer.attention.output __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" ) __UpperCamelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate __UpperCamelCase : BertIntermediate = layer.intermediate __UpperCamelCase : Dict = check_and_map_params( intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output __UpperCamelCase : BertOutput = layer.output __UpperCamelCase : Dict = check_and_map_params( bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) __UpperCamelCase : Union[str, Any] = check_and_map_params( bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) __UpperCamelCase : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) __UpperCamelCase : int = check_and_map_params( bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" ) __UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"] # Get gluon output __UpperCamelCase : Dict = mx.nd.array([input_ids] ) __UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) __UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() __UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" ) __UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0] __UpperCamelCase : List[Any] = output_gluon[0].asnumpy() __UpperCamelCase : Optional[int] = output_hf[0].detach().numpy() __UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
1
'''simple docstring''' from pathlib import Path import fire def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : str = Path(snake_case__ ) __UpperCamelCase : List[Any] = Path(snake_case__ ) dest_dir.mkdir(exist_ok=snake_case__ ) for path in src_dir.iterdir(): __UpperCamelCase : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCamelCase : Dict = dest_dir.joinpath(path.name ) print(snake_case__ ) dest_path.open("w" ).write("\n".join(snake_case__ ) ) if __name__ == "__main__": fire.Fire(minify)
298
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "megatron-bert" def __init__(self , _UpperCAmelCase=2_9_0_5_6 , _UpperCAmelCase=1_0_2_4 , _UpperCAmelCase=2_4 , _UpperCAmelCase=1_6 , _UpperCAmelCase=4_0_9_6 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Tuple: super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Tuple = vocab_size __UpperCamelCase : Optional[int] = hidden_size __UpperCamelCase : str = num_hidden_layers __UpperCamelCase : Dict = num_attention_heads __UpperCamelCase : int = hidden_act __UpperCamelCase : List[Any] = intermediate_size __UpperCamelCase : str = hidden_dropout_prob __UpperCamelCase : Any = attention_probs_dropout_prob __UpperCamelCase : str = max_position_embeddings __UpperCamelCase : List[str] = type_vocab_size __UpperCamelCase : str = initializer_range __UpperCamelCase : List[Any] = layer_norm_eps __UpperCamelCase : str = position_embedding_type __UpperCamelCase : Optional[int] = use_cache
298
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = (PNDMScheduler,) A = (("num_inference_steps", 5_0),) def a_ (self , **_UpperCAmelCase ) -> str: __UpperCamelCase : Optional[Any] = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**_UpperCAmelCase ) return config def a_ (self , _UpperCAmelCase=0 , **_UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Any = dict(self.forward_default_kwargs ) __UpperCamelCase : Optional[int] = kwargs.pop("num_inference_steps" , _UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.dummy_sample __UpperCamelCase : Any = 0.1 * sample __UpperCamelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCamelCase : List[str] = self.get_scheduler_config(**_UpperCAmelCase ) __UpperCamelCase : List[str] = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase : List[Any] = dummy_past_residuals[:] __UpperCamelCase : int = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : List[Any] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCamelCase : List[Any] = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : int = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def a_ (self ) -> Optional[Any]: pass def a_ (self , _UpperCAmelCase=0 , **_UpperCAmelCase ) -> int: __UpperCamelCase : Optional[Any] = dict(self.forward_default_kwargs ) __UpperCamelCase : Optional[int] = kwargs.pop("num_inference_steps" , _UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.dummy_sample __UpperCamelCase : Union[str, Any] = 0.1 * sample __UpperCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCamelCase : List[Any] = self.get_scheduler_config() __UpperCamelCase : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __UpperCamelCase : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) __UpperCamelCase : int = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __UpperCamelCase : List[Any] = dummy_past_residuals[:] __UpperCamelCase : Dict = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : Any = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCamelCase : Dict = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : str = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def a_ (self , **_UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : List[str] = self.scheduler_classes[0] __UpperCamelCase : Tuple = self.get_scheduler_config(**_UpperCAmelCase ) __UpperCamelCase : Tuple = scheduler_class(**_UpperCAmelCase ) __UpperCamelCase : Dict = 1_0 __UpperCamelCase : Optional[int] = self.dummy_model() __UpperCamelCase : Dict = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): __UpperCamelCase : int = model(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Any = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def a_ (self ) -> Tuple: __UpperCamelCase : int = dict(self.forward_default_kwargs ) __UpperCamelCase : int = kwargs.pop("num_inference_steps" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: __UpperCamelCase : Union[str, Any] = self.get_scheduler_config() __UpperCamelCase : Any = scheduler_class(**_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = self.dummy_sample __UpperCamelCase : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , "set_timesteps" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , "set_timesteps" ): __UpperCamelCase : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCamelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __UpperCamelCase : Dict = dummy_past_residuals[:] __UpperCamelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : List[Any] = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCamelCase : Optional[Any] = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __UpperCamelCase : str = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def a_ (self ) -> str: for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def a_ (self ) -> int: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) __UpperCamelCase : Tuple = self.scheduler_classes[0] __UpperCamelCase : List[str] = self.get_scheduler_config(steps_offset=1 ) __UpperCamelCase : Union[str, Any] = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def a_ (self ) -> List[Any]: for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def a_ (self ) -> List[str]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def a_ (self ) -> Union[str, Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def a_ (self ) -> List[Any]: for t in [1, 5, 1_0]: self.check_over_forward(time_step=_UpperCAmelCase ) def a_ (self ) -> int: for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def a_ (self ) -> str: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 __UpperCamelCase : Union[str, Any] = 2_7 for scheduler_class in self.scheduler_classes: __UpperCamelCase : Union[str, Any] = self.dummy_sample __UpperCamelCase : List[Any] = 0.1 * sample __UpperCamelCase : Optional[int] = self.get_scheduler_config() __UpperCamelCase : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): __UpperCamelCase : Any = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def a_ (self ) -> List[str]: with self.assertRaises(_UpperCAmelCase ): __UpperCamelCase : List[str] = self.scheduler_classes[0] __UpperCamelCase : Any = self.get_scheduler_config() __UpperCamelCase : List[Any] = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = self.full_loop() __UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __UpperCamelCase : Dict = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1_318 ) < 1E-2 assert abs(result_mean.item() - 0.2_580 ) < 1E-3 def a_ (self ) -> Tuple: __UpperCamelCase : Tuple = self.full_loop(prediction_type="v_prediction" ) __UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3_986 ) < 1E-2 assert abs(result_mean.item() - 0.0_878 ) < 1E-3 def a_ (self ) -> Optional[Any]: # We specify different beta, so that the first alpha is 0.99 __UpperCamelCase : Optional[int] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) __UpperCamelCase : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __UpperCamelCase : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0_399 ) < 1E-2 assert abs(result_mean.item() - 0.2_995 ) < 1E-3 def a_ (self ) -> Optional[Any]: # We specify different beta, so that the first alpha is 0.99 __UpperCamelCase : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) __UpperCamelCase : int = torch.sum(torch.abs(_UpperCAmelCase ) ) __UpperCamelCase : str = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9_482 ) < 1E-2 assert abs(result_mean.item() - 0.2_434 ) < 1E-3
298
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
1
'''simple docstring''' from __future__ import annotations import math def __lowerCAmelCase ( snake_case__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Any = str(snake_case__ ) __UpperCamelCase : str = [n] for i in range(1 , len(snake_case__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def __lowerCAmelCase ( snake_case__ ): if len(str(snake_case__ ) ) > 3: if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ): return False return True def __lowerCAmelCase ( snake_case__ = 11 ): __UpperCamelCase : list[int] = [] __UpperCamelCase : Union[str, Any] = 13 while len(snake_case__ ) != count: if validate(snake_case__ ): __UpperCamelCase : Any = list_truncated_nums(snake_case__ ) if all(is_prime(snake_case__ ) for i in list_nums ): list_truncated_primes.append(snake_case__ ) num += 2 return list_truncated_primes def __lowerCAmelCase ( ): return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'{sum(compute_truncated_primes(11)) = }')
298
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
1
'''simple docstring''' from math import ceil def __lowerCAmelCase ( snake_case__ = 1_001 ): __UpperCamelCase : Union[str, Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __UpperCamelCase : int = 2 * i + 1 __UpperCamelCase : Union[str, Any] = 2 * i __UpperCamelCase : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: _lowerCAmelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
298
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = '''config.json''' _lowerCAmelCase = '''diffusion_pytorch_model.bin''' _lowerCAmelCase = '''diffusion_flax_model.msgpack''' _lowerCAmelCase = '''model.onnx''' _lowerCAmelCase = '''diffusion_pytorch_model.safetensors''' _lowerCAmelCase = '''weights.pb''' _lowerCAmelCase = '''https://huggingface.co''' _lowerCAmelCase = default_cache_path _lowerCAmelCase = '''diffusers_modules''' _lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _lowerCAmelCase = ['''fp16''', '''non-ema'''] _lowerCAmelCase = '''.self_attn'''
298
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): for attribute in key.split("." ): __UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if weight_type is not None: __UpperCamelCase : Union[str, Any] = getattr(snake_case__ , snake_case__ ).shape else: __UpperCamelCase : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase : Optional[int] = value elif weight_type == "weight_g": __UpperCamelCase : Dict = value elif weight_type == "weight_v": __UpperCamelCase : Tuple = value elif weight_type == "bias": __UpperCamelCase : Optional[int] = value elif weight_type == "running_mean": __UpperCamelCase : Optional[Any] = value elif weight_type == "running_var": __UpperCamelCase : Optional[int] = value elif weight_type == "num_batches_tracked": __UpperCamelCase : Optional[int] = value elif weight_type == "inv_freq": __UpperCamelCase : str = value else: __UpperCamelCase : List[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Any = [] __UpperCamelCase : Tuple = fairseq_model.state_dict() __UpperCamelCase : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == "group" , ) __UpperCamelCase : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase : List[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __UpperCamelCase : Dict = True if "*" in mapped_key: __UpperCamelCase : Union[str, Any] = name.split(snake_case__ )[0].split("." )[-2] __UpperCamelCase : int = mapped_key.replace("*" , snake_case__ ) if "pos_bias_u" in name: __UpperCamelCase : Dict = None elif "pos_bias_v" in name: __UpperCamelCase : List[Any] = None elif "weight_g" in name: __UpperCamelCase : Any = "weight_g" elif "weight_v" in name: __UpperCamelCase : Optional[Any] = "weight_v" elif "bias" in name: __UpperCamelCase : List[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase : List[str] = "weight" elif "running_mean" in name: __UpperCamelCase : Optional[Any] = "running_mean" elif "inv_freq" in name: __UpperCamelCase : Union[str, Any] = "inv_freq" elif "running_var" in name: __UpperCamelCase : Optional[int] = "running_var" elif "num_batches_tracked" in name: __UpperCamelCase : List[str] = "num_batches_tracked" else: __UpperCamelCase : Dict = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : List[str] = full_name.split("conv_layers." )[-1] __UpperCamelCase : Optional[Any] = name.split("." ) __UpperCamelCase : Tuple = int(items[0] ) __UpperCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase : List[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ): if config_path is not None: __UpperCamelCase : Tuple = WavaVecaConformerConfig.from_pretrained(snake_case__ , hidden_act="swish" ) else: __UpperCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: __UpperCamelCase : Dict = "rotary" if is_finetuned: if dict_path: __UpperCamelCase : Any = Dictionary.load(snake_case__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase : int = target_dict.pad_index __UpperCamelCase : List[str] = target_dict.bos_index __UpperCamelCase : List[Any] = target_dict.eos_index __UpperCamelCase : Tuple = len(target_dict.symbols ) __UpperCamelCase : int = os.path.join(snake_case__ , "vocab.json" ) if not os.path.isdir(snake_case__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case__ ) ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) __UpperCamelCase : Union[str, Any] = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : int = 1 with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(snake_case__ , snake_case__ ) __UpperCamelCase : str = WavaVecaCTCTokenizer( snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case__ , ) __UpperCamelCase : Any = True if config.feat_extract_norm == "layer" else False __UpperCamelCase : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) __UpperCamelCase : List[Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) processor.save_pretrained(snake_case__ ) __UpperCamelCase : Any = WavaVecaConformerForCTC(snake_case__ ) else: __UpperCamelCase : List[str] = WavaVecaConformerForPreTraining(snake_case__ ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: __UpperCamelCase : Dict = argparse.Namespace(task="audio_pretraining" ) __UpperCamelCase : str = fairseq.tasks.setup_task(snake_case__ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ ) __UpperCamelCase : Any = model[0].eval() recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned ) hf_wavavec.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
298
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : List[str] = 1_3 __UpperCamelCase : List[Any] = 7 __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = True __UpperCamelCase : str = True __UpperCamelCase : List[Any] = 9_9 __UpperCamelCase : Union[str, Any] = 3_8_4 __UpperCamelCase : str = 2 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : Any = 3_7 __UpperCamelCase : str = "gelu" __UpperCamelCase : Optional[Any] = 0.1 __UpperCamelCase : str = 0.1 __UpperCamelCase : str = 5_1_2 __UpperCamelCase : Optional[Any] = 1_6 __UpperCamelCase : Dict = 2 __UpperCamelCase : Optional[int] = 0.02 __UpperCamelCase : List[Any] = 3 __UpperCamelCase : Optional[Any] = 4 __UpperCamelCase : int = 1_2_8 __UpperCamelCase : Tuple = 2 __UpperCamelCase : str = 9 __UpperCamelCase : List[Any] = 1 __UpperCamelCase : Any = None def a_ (self ) -> int: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : str = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : int = None if self.use_token_type_ids: __UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : List[Any] = None __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Optional[Any] = None if self.use_labels: __UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase : str = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCamelCase : Optional[Any] = [input_ids, input_mask] __UpperCamelCase : str = model(_UpperCAmelCase ) __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = self.num_labels __UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __UpperCamelCase : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Optional[int] = self.num_choices __UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase : List[str] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCamelCase : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = self.num_labels __UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __UpperCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __UpperCamelCase : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ (self ) -> str: __UpperCamelCase : str = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) : Any = config_and_inputs __UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def a_ (self ) -> Optional[int]: __UpperCamelCase : Tuple = TFConvBertModelTester(self ) __UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> Dict: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a_ (self ) -> Any: __UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : int = True if hasattr(_UpperCAmelCase , "use_cache" ): __UpperCamelCase : List[Any] = True __UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : int = model_class(_UpperCAmelCase ) __UpperCamelCase : Any = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" ) __UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase ) __UpperCamelCase : Dict = model(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : Any = outputs["encoder_hidden_states"] __UpperCamelCase : Tuple = outputs["encoder_attentions"] else: __UpperCamelCase : Tuple = outputs["hidden_states"] __UpperCamelCase : Optional[int] = outputs["attentions"] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __UpperCamelCase : Any = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = True __UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) __UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) __UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Dict = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __UpperCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase ): __UpperCamelCase : Any = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __UpperCamelCase : Any = True __UpperCamelCase : Dict = False __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __UpperCamelCase : str = model_class(_UpperCAmelCase ) __UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Tuple = model_class(_UpperCAmelCase ) __UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : str = True __UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): '''simple docstring''' @slow def a_ (self ) -> str: __UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) __UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0] __UpperCamelCase : Tuple = [1, 6, 7_6_8] self.assertEqual(output.shape , _UpperCAmelCase ) __UpperCamelCase : Any = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
298
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = KandinskyImgaImgPipeline A = ["prompt", "image_embeds", "negative_image_embeds", "image"] A = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] A = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A = False @property def a_ (self ) -> List[str]: return 3_2 @property def a_ (self ) -> Optional[Any]: return 3_2 @property def a_ (self ) -> Optional[Any]: return self.time_input_dim @property def a_ (self ) -> Tuple: return self.time_input_dim * 4 @property def a_ (self ) -> List[str]: return 1_0_0 @property def a_ (self ) -> Tuple: __UpperCamelCase : Tuple = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def a_ (self ) -> int: torch.manual_seed(0 ) __UpperCamelCase : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __UpperCamelCase : Dict = MultilingualCLIP(_UpperCAmelCase ) __UpperCamelCase : List[Any] = text_encoder.eval() return text_encoder @property def a_ (self ) -> Any: torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __UpperCamelCase : Union[str, Any] = UNetaDConditionModel(**_UpperCAmelCase ) return model @property def a_ (self ) -> Any: return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a_ (self ) -> str: torch.manual_seed(0 ) __UpperCamelCase : Any = VQModel(**self.dummy_movq_kwargs ) return model def a_ (self ) -> Any: __UpperCamelCase : Union[str, Any] = self.dummy_text_encoder __UpperCamelCase : List[str] = self.dummy_tokenizer __UpperCamelCase : Any = self.dummy_unet __UpperCamelCase : Optional[Any] = self.dummy_movq __UpperCamelCase : Any = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.00_085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __UpperCamelCase : Dict = DDIMScheduler(**_UpperCAmelCase ) __UpperCamelCase : Dict = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[Any]: __UpperCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __UpperCamelCase : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCAmelCase ) # create init_image __UpperCamelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : Optional[int] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_UpperCAmelCase ).startswith("mps" ): __UpperCamelCase : Optional[int] = torch.manual_seed(_UpperCAmelCase ) else: __UpperCamelCase : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __UpperCamelCase : str = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def a_ (self ) -> Optional[Any]: __UpperCamelCase : Dict = "cpu" __UpperCamelCase : List[Any] = self.get_dummy_components() __UpperCamelCase : int = self.pipeline_class(**_UpperCAmelCase ) __UpperCamelCase : int = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __UpperCamelCase : Optional[Any] = output.images __UpperCamelCase : Any = pipe( **self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0] __UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1] __UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCamelCase : Tuple = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ (self ) -> int: __UpperCamelCase : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __UpperCamelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __UpperCamelCase : Optional[Any] = "A red cartoon frog, 4k" __UpperCamelCase : Any = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_UpperCAmelCase ) __UpperCamelCase : List[Any] = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __UpperCamelCase : Union[str, Any] = pipeline.to(_UpperCAmelCase ) pipeline.set_progress_bar_config(disable=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase , __UpperCamelCase : str = pipe_prior( _UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __UpperCamelCase : List[str] = pipeline( _UpperCAmelCase , image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __UpperCamelCase : Any = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
298
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() @dataclass class A : '''simple docstring''' A = 42 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase ) -> Optional[int]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def a_ (self ) -> Tuple: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A : '''simple docstring''' A = 42 A = 42 A = 0 A = field(default_factory=SCREAMING_SNAKE_CASE__ ) A = field(default_factory=SCREAMING_SNAKE_CASE__ ) def __call__(self , _UpperCAmelCase ) -> Any: __UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized __UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" f" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ): print(F"Converting {name}..." ) with torch.no_grad(): __UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval() __UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval() __UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ ) __UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(snake_case__ ) assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one." __UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}" print(snake_case__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , ) # we can use the convnext one __UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , ) print(F"Pushed {checkpoint_name}" ) def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ): __UpperCamelCase : str = "imagenet-1k-id2label.json" __UpperCamelCase : Any = 1_000 __UpperCamelCase : List[str] = (1, num_labels) __UpperCamelCase : List[str] = "huggingface/label-files" __UpperCamelCase : str = num_labels __UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) ) __UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCamelCase : Any = idalabel __UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} __UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) __UpperCamelCase : Dict = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
298
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Any = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Dict = {} __UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: __UpperCamelCase : Any = json.load(snake_case__ ) else: raise ValueError(F"can't find {path}" ) return results def __lowerCAmelCase ( ): __UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @classmethod def a_ (cls ) -> Union[str, Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase : Optional[Any] = tempfile.mkdtemp() __UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def a_ (cls ) -> Union[str, Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Optional[int]: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["perplexity"] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Any: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 2_8 ) self.assertGreaterEqual(result["eval_exact"] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Dict: __UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Dict = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_rouge1"] , 1_0 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_bleu"] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) __UpperCamelCase : str = get_results(_UpperCAmelCase ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
298
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = XLNetTokenizer A = XLNetTokenizerFast A = True A = True def a_ (self ) -> int: super().setUp() # We have a SentencePiece fixture for testing __UpperCamelCase : int = XLNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def a_ (self ) -> List[str]: __UpperCamelCase : Optional[Any] = "<s>" __UpperCamelCase : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(_UpperCAmelCase ) , 1_0_0_6 ) def a_ (self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = XLNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) __UpperCamelCase : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) __UpperCamelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) __UpperCamelCase : int = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = XLNetTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) __UpperCamelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def a_ (self ) -> Any: __UpperCamelCase : Optional[int] = XLNetTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Any = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) __UpperCamelCase : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=_UpperCAmelCase ) __UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) __UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def a_ (self ) -> Any: # fmt: off __UpperCamelCase : List[str] = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
298
'''simple docstring''' from maths.prime_check import is_prime def __lowerCAmelCase ( snake_case__ ): if not isinstance(snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' import functools def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = len(snake_case__ ) __UpperCamelCase : Optional[Any] = len(snake_case__ ) @functools.cache def min_distance(snake_case__ , snake_case__ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case__ ) , 1 + min_distance(snake_case__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: __UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy) __UpperCamelCase : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCAmelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
298
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCAmelCase = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCAmelCase = '''src/transformers''' _lowerCAmelCase = '''docs/source/en/tasks''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f: __UpperCamelCase : str = f.readlines() # Find the start prompt. __UpperCamelCase : Dict = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 __UpperCamelCase : Dict = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) _lowerCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) __UpperCamelCase : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def __lowerCAmelCase ( snake_case__ , snake_case__=False ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowerCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
298
1
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = IFPipeline A = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} A = TEXT_TO_IMAGE_BATCH_PARAMS A = PipelineTesterMixin.required_optional_params - {"latents"} def a_ (self ) -> List[Any]: return self._get_dummy_components() def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[Any]: if str(_UpperCAmelCase ).startswith("mps" ): __UpperCamelCase : str = torch.manual_seed(_UpperCAmelCase ) else: __UpperCamelCase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __UpperCamelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def a_ (self ) -> Tuple: self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def a_ (self ) -> Optional[int]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def a_ (self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def a_ (self ) -> Union[str, Any]: self._test_save_load_local() def a_ (self ) -> Any: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def a_ (self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): '''simple docstring''' def a_ (self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ (self ) -> Dict: # if __UpperCamelCase : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) __UpperCamelCase : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) __UpperCamelCase , __UpperCamelCase : Optional[int] = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __UpperCamelCase : Any = None __UpperCamelCase : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __UpperCamelCase : List[Any] = IFImgaImgPipeline(**pipe_a.components ) __UpperCamelCase : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __UpperCamelCase : str = IFInpaintingPipeline(**pipe_a.components ) __UpperCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: # pipeline 1 _start_torch_memory_measurement() __UpperCamelCase : int = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : Dict = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , ) __UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (6_4, 6_4, 3) __UpperCamelCase : Optional[int] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 __UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() __UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : int = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) __UpperCamelCase : Optional[int] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __UpperCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __UpperCamelCase : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: # pipeline 1 _start_torch_memory_measurement() __UpperCamelCase : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : List[Any] = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , ) __UpperCamelCase : str = output.images[0] assert image.shape == (6_4, 6_4, 3) __UpperCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __UpperCamelCase : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() __UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) __UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __UpperCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __UpperCamelCase : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: # pipeline 1 _start_torch_memory_measurement() __UpperCamelCase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) __UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : List[Any] = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , ) __UpperCamelCase : Any = output.images[0] assert image.shape == (6_4, 6_4, 3) __UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __UpperCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() __UpperCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __UpperCamelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) __UpperCamelCase : Dict = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) __UpperCamelCase : Dict = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __UpperCamelCase : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def __lowerCAmelCase ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = ["image_processor", "tokenizer"] A = "OwlViTImageProcessor" A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCAmelCase , ) __UpperCamelCase : str = kwargs.pop("feature_extractor" ) __UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): __UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): __UpperCamelCase : List[str] = [] # Maximum number of queries across batch __UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: __UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase )) __UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __UpperCamelCase : Optional[Any] = BatchEncoding() __UpperCamelCase : Union[str, Any] = input_ids __UpperCamelCase : List[str] = attention_mask if query_images is not None: __UpperCamelCase : str = BatchEncoding() __UpperCamelCase : Any = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values __UpperCamelCase : List[Any] = query_pixel_values if images is not None: __UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase : Optional[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def a_ (self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , ) return self.image_processor_class @property def a_ (self ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , ) return self.image_processor
298
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowerCAmelCase = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "mask2former" A = ["swin"] A = {"hidden_size": "hidden_dim"} def __init__(self , _UpperCAmelCase = None , _UpperCAmelCase = 2_5_6 , _UpperCAmelCase = 2_5_6 , _UpperCAmelCase = 2_5_6 , _UpperCAmelCase = 1_0_2_4 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 6 , _UpperCAmelCase = 1_0 , _UpperCAmelCase = 8 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 2_0_4_8 , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = 4 , _UpperCAmelCase = 2_5_5 , _UpperCAmelCase = 1_0_0 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 5.0 , _UpperCAmelCase = 5.0 , _UpperCAmelCase = 1_2_5_4_4 , _UpperCAmelCase = 3.0 , _UpperCAmelCase = 0.75 , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = True , _UpperCAmelCase = [4, 8, 1_6, 3_2] , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Any: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) __UpperCamelCase : Optional[int] = CONFIG_MAPPING["swin"]( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : Optional[int] = backbone_config.pop("model_type" ) __UpperCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type] __UpperCamelCase : Optional[int] = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " f"Supported model types: {','.join(self.backbones_supported )}" ) __UpperCamelCase : Optional[Any] = backbone_config __UpperCamelCase : Union[str, Any] = feature_size __UpperCamelCase : List[str] = mask_feature_size __UpperCamelCase : List[str] = hidden_dim __UpperCamelCase : Dict = encoder_feedforward_dim __UpperCamelCase : str = activation_function __UpperCamelCase : Any = encoder_layers __UpperCamelCase : Dict = decoder_layers __UpperCamelCase : Dict = num_attention_heads __UpperCamelCase : Optional[Any] = dropout __UpperCamelCase : Any = dim_feedforward __UpperCamelCase : int = pre_norm __UpperCamelCase : int = enforce_input_projection __UpperCamelCase : List[str] = common_stride __UpperCamelCase : List[str] = ignore_value __UpperCamelCase : int = num_queries __UpperCamelCase : List[str] = no_object_weight __UpperCamelCase : Optional[int] = class_weight __UpperCamelCase : str = mask_weight __UpperCamelCase : Any = dice_weight __UpperCamelCase : str = train_num_points __UpperCamelCase : Optional[Any] = oversample_ratio __UpperCamelCase : List[Any] = importance_sample_ratio __UpperCamelCase : Optional[int] = init_std __UpperCamelCase : Optional[int] = init_xavier_std __UpperCamelCase : Dict = use_auxiliary_loss __UpperCamelCase : Union[str, Any] = feature_strides __UpperCamelCase : Any = output_auxiliary_logits __UpperCamelCase : Union[str, Any] = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> str: return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a_ (self ) -> Dict[str, any]: __UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) __UpperCamelCase : str = self.backbone_config.to_dict() __UpperCamelCase : Any = self.__class__.model_type return output
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] ) def __lowerCAmelCase ( snake_case__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
298
1
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "efficientformer" def __init__(self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 4_4_8 , _UpperCAmelCase = 3_2 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1_6 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1E-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1E-12 , _UpperCAmelCase = 2_2_4 , _UpperCAmelCase = 1E-05 , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : int = hidden_act __UpperCamelCase : Optional[int] = hidden_dropout_prob __UpperCamelCase : List[str] = hidden_sizes __UpperCamelCase : Union[str, Any] = num_hidden_layers __UpperCamelCase : Any = num_attention_heads __UpperCamelCase : Any = initializer_range __UpperCamelCase : Optional[int] = layer_norm_eps __UpperCamelCase : Union[str, Any] = patch_size __UpperCamelCase : Tuple = num_channels __UpperCamelCase : List[str] = depths __UpperCamelCase : Union[str, Any] = mlp_expansion_ratio __UpperCamelCase : Any = downsamples __UpperCamelCase : Optional[int] = dim __UpperCamelCase : Tuple = key_dim __UpperCamelCase : Dict = attention_ratio __UpperCamelCase : str = resolution __UpperCamelCase : Union[str, Any] = pool_size __UpperCamelCase : str = downsample_patch_size __UpperCamelCase : List[Any] = downsample_stride __UpperCamelCase : Optional[int] = downsample_pad __UpperCamelCase : Dict = drop_path_rate __UpperCamelCase : Tuple = num_metaad_blocks __UpperCamelCase : Union[str, Any] = distillation __UpperCamelCase : Optional[int] = use_layer_scale __UpperCamelCase : Union[str, Any] = layer_scale_init_value __UpperCamelCase : str = image_size __UpperCamelCase : int = batch_norm_eps
298
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def __lowerCAmelCase ( ): __UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) __UpperCamelCase : Optional[Any] = parser.parse_args() return args.f def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ): __UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" ) if os.path.exists(snake_case__ ): with open(snake_case__ , "r" ) as f: return json.load(snake_case__ ) raise ValueError(F"can't find {path}" ) _lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_glue.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_clm_flax.main() __UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def a_ (self ) -> str: __UpperCamelCase : Any = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_summarization_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def a_ (self ) -> int: __UpperCamelCase : int = self.get_auto_remove_tmp_dir() __UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_mlm_flax.main() __UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def a_ (self ) -> Dict: __UpperCamelCase : Dict = self.get_auto_remove_tmp_dir() __UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_ta_mlm_flax.main() __UpperCamelCase : Tuple = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def a_ (self ) -> Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_flax_ner.main() __UpperCamelCase : int = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ): run_qa.main() __UpperCamelCase : List[Any] = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
298
1
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize ) -> int: __UpperCamelCase : Any = "bilinear" __UpperCamelCase : Optional[int] = max_size __UpperCamelCase : Union[str, Any] = short_edge_length def __call__(self , _UpperCAmelCase ) -> int: __UpperCamelCase : Union[str, Any] = [] for img in imgs: __UpperCamelCase , __UpperCamelCase : Dict = img.shape[:2] # later: provide list and randomly choose index for resize __UpperCamelCase : Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img __UpperCamelCase : str = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Any = size, scale * w else: __UpperCamelCase , __UpperCamelCase : Tuple = scale * h, size if max(_UpperCAmelCase , _UpperCAmelCase ) > self.max_size: __UpperCamelCase : Optional[Any] = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : List[str] = newh * scale __UpperCamelCase : Dict = neww * scale __UpperCamelCase : str = int(neww + 0.5 ) __UpperCamelCase : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: __UpperCamelCase : int = Image.fromarray(_UpperCAmelCase ) __UpperCamelCase : Tuple = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) __UpperCamelCase : Optional[Any] = np.asarray(_UpperCAmelCase ) else: __UpperCamelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw __UpperCamelCase : Optional[int] = nn.functional.interpolate( _UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase ).squeeze(0 ) img_augs.append(_UpperCAmelCase ) return img_augs class A : '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> List[Any]: __UpperCamelCase : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) __UpperCamelCase : Tuple = cfg.INPUT.FORMAT __UpperCamelCase : Union[str, Any] = cfg.SIZE_DIVISIBILITY __UpperCamelCase : Dict = cfg.PAD_VALUE __UpperCamelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST __UpperCamelCase : Optional[int] = cfg.MODEL.DEVICE __UpperCamelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) __UpperCamelCase : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) __UpperCamelCase : Tuple = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std def a_ (self , _UpperCAmelCase ) -> List[str]: __UpperCamelCase : Any = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) ) __UpperCamelCase : Optional[Any] = [im.shape[-2:] for im in images] __UpperCamelCase : str = [ nn.functional.pad( _UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_UpperCAmelCase , _UpperCAmelCase ) ] return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase ) def __call__(self , _UpperCAmelCase , _UpperCAmelCase=False ) -> int: with torch.no_grad(): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : Optional[int] = [images] if single_image: assert len(_UpperCAmelCase ) == 1 for i in range(len(_UpperCAmelCase ) ): if isinstance(images[i] , torch.Tensor ): images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( _UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge __UpperCamelCase : List[str] = torch.tensor([im.shape[:2] for im in images] ) __UpperCamelCase : Tuple = self.aug(_UpperCAmelCase ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __UpperCamelCase : str = [self.normalizer(_UpperCAmelCase ) for x in images] # now pad them to do the following operations __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.pad(_UpperCAmelCase ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __UpperCamelCase : List[Any] = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def __lowerCAmelCase ( snake_case__ , snake_case__ ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def __lowerCAmelCase ( snake_case__ , snake_case__ ): assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" __UpperCamelCase , __UpperCamelCase : List[str] = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
298
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int: __UpperCamelCase : List[str] = parent __UpperCamelCase : str = batch_size __UpperCamelCase : str = decoder_seq_length # For common tests __UpperCamelCase : Optional[int] = self.decoder_seq_length __UpperCamelCase : Any = is_training __UpperCamelCase : Tuple = use_attention_mask __UpperCamelCase : Optional[int] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : Optional[int] = d_model __UpperCamelCase : Union[str, Any] = d_model __UpperCamelCase : int = decoder_layers __UpperCamelCase : Dict = decoder_layers __UpperCamelCase : str = decoder_ffn_dim __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : Optional[Any] = decoder_attention_heads __UpperCamelCase : List[Any] = eos_token_id __UpperCamelCase : int = bos_token_id __UpperCamelCase : Tuple = pad_token_id __UpperCamelCase : Tuple = decoder_start_token_id __UpperCamelCase : Dict = use_cache __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : int = None __UpperCamelCase : Optional[int] = decoder_seq_length __UpperCamelCase : Optional[int] = 2 __UpperCamelCase : Optional[int] = 1 def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : int = None if self.use_attention_mask: __UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __UpperCamelCase : Optional[Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]: __UpperCamelCase : List[Any] = True __UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() __UpperCamelCase : Optional[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) __UpperCamelCase : List[Any] = model(_UpperCAmelCase ) __UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) __UpperCamelCase : List[Any] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"] __UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"] # select random slice __UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs __UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A = (TrOCRForCausalLM,) if is_torch_available() else () A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} A = True A = False def a_ (self ) -> List[str]: __UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase ) __UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase ) def a_ (self ) -> Dict: pass def a_ (self ) -> Optional[int]: pass def a_ (self ) -> Optional[Any]: pass def a_ (self ) -> Dict: self.config_tester.run_common_tests() def a_ (self ) -> List[Any]: __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase ) def a_ (self ) -> Any: return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def a_ (self ) -> Tuple: pass
298
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''', } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "git_vision_model" def __init__(self , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3 , _UpperCAmelCase=2_2_4 , _UpperCAmelCase=1_6 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = hidden_size __UpperCamelCase : Tuple = intermediate_size __UpperCamelCase : str = num_hidden_layers __UpperCamelCase : str = num_attention_heads __UpperCamelCase : List[str] = num_channels __UpperCamelCase : Any = patch_size __UpperCamelCase : Optional[Any] = image_size __UpperCamelCase : Optional[int] = initializer_range __UpperCamelCase : int = attention_dropout __UpperCamelCase : List[Any] = layer_norm_eps __UpperCamelCase : Union[str, Any] = hidden_act @classmethod def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig": cls._set_token_in_kwargs(_UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase : int = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type" ) == "git": __UpperCamelCase : Any = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "git" def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=3_0_5_2_2 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=6 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_0_2_4 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1_0_1 , _UpperCAmelCase=1_0_2 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[Any]: super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) if vision_config is None: __UpperCamelCase : Optional[int] = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values." ) __UpperCamelCase : List[Any] = GitVisionConfig(**_UpperCAmelCase ) __UpperCamelCase : str = vocab_size __UpperCamelCase : Union[str, Any] = hidden_size __UpperCamelCase : Any = num_hidden_layers __UpperCamelCase : Tuple = num_attention_heads __UpperCamelCase : Union[str, Any] = hidden_act __UpperCamelCase : Tuple = intermediate_size __UpperCamelCase : List[Any] = hidden_dropout_prob __UpperCamelCase : List[Any] = attention_probs_dropout_prob __UpperCamelCase : Tuple = max_position_embeddings __UpperCamelCase : int = initializer_range __UpperCamelCase : List[str] = layer_norm_eps __UpperCamelCase : List[Any] = position_embedding_type __UpperCamelCase : Dict = use_cache __UpperCamelCase : str = tie_word_embeddings __UpperCamelCase : Any = num_image_with_embedding __UpperCamelCase : str = bos_token_id __UpperCamelCase : str = eos_token_id def a_ (self ) -> List[Any]: __UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ ) __UpperCamelCase : List[str] = self.vision_config.to_dict() __UpperCamelCase : List[str] = self.__class__.model_type return output
298
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''0.12.2'''): raise Exception('''requires fairseq >= 0.12.2''') if version.parse(fairseq.__version__) > version.parse('''2'''): raise Exception('''requires fairseq < v2''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''Hello, World!''' _lowerCAmelCase = '''en_XX''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Union[str, Any] = Path("data_bin" ) __UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case__ ) __UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder __UpperCamelCase : Optional[int] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case__ ) __UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight __UpperCamelCase : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight __UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase : int = model.roberta.encoder.layer[i] __UpperCamelCase : Any = xmod_sent_encoder.layers[i] # self attention __UpperCamelCase : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight __UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias __UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight __UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight __UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight __UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias __UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight __UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCamelCase : Dict = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __UpperCamelCase : List[Any] = xmod_layer.fca.weight __UpperCamelCase : Optional[int] = xmod_layer.fca.bias # output __UpperCamelCase : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __UpperCamelCase : Tuple = xmod_layer.fca.weight __UpperCamelCase : int = xmod_layer.fca.bias __UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight __UpperCamelCase : int = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight __UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCamelCase : Any = bert_output.adapter_modules[lang_code] __UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code] __UpperCamelCase : int = from_adapter.fca.weight __UpperCamelCase : Dict = from_adapter.fca.bias __UpperCamelCase : List[Any] = from_adapter.fca.weight __UpperCamelCase : int = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight __UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight __UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias __UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight __UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight __UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight __UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias __UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight __UpperCamelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case__ ) __UpperCamelCase : Optional[Any] = model(snake_case__ )[0] if classification_head: __UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) ) else: __UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCAmelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
298
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "ctrl" A = ["past_key_values"] A = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__(self , _UpperCAmelCase=2_4_6_5_3_4 , _UpperCAmelCase=2_5_6 , _UpperCAmelCase=1_2_8_0 , _UpperCAmelCase=8_1_9_2 , _UpperCAmelCase=4_8 , _UpperCAmelCase=1_6 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Dict: __UpperCamelCase : Union[str, Any] = vocab_size __UpperCamelCase : Optional[Any] = n_positions __UpperCamelCase : Tuple = n_embd __UpperCamelCase : Optional[int] = n_layer __UpperCamelCase : Any = n_head __UpperCamelCase : List[Any] = dff __UpperCamelCase : List[str] = resid_pdrop __UpperCamelCase : Union[str, Any] = embd_pdrop __UpperCamelCase : Any = layer_norm_epsilon __UpperCamelCase : Optional[int] = initializer_range __UpperCamelCase : Optional[Any] = use_cache super().__init__(**_UpperCAmelCase )
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
298
1
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : List[str] = 1.5 __UpperCamelCase : List[Any] = int(factor * num_class_images ) __UpperCamelCase : Tuple = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case__ , aesthetic_weight=0.1 ) os.makedirs(F"{class_data_dir}/images" , exist_ok=snake_case__ ) if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: __UpperCamelCase : str = client.query(text=snake_case__ ) if len(snake_case__ ) >= factor * num_class_images or num_images > 1E4: break else: __UpperCamelCase : str = int(factor * num_images ) __UpperCamelCase : List[Any] = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case__ , aesthetic_weight=0.1 , ) __UpperCamelCase : List[str] = 0 __UpperCamelCase : Union[str, Any] = 0 __UpperCamelCase : Optional[int] = tqdm(desc="downloading real regularization images" , total=snake_case__ ) with open(F"{class_data_dir}/caption.txt" , "w" ) as fa, open(F"{class_data_dir}/urls.txt" , "w" ) as fa, open( F"{class_data_dir}/images.txt" , "w" ) as fa: while total < num_class_images: __UpperCamelCase : List[Any] = class_images[count] count += 1 try: __UpperCamelCase : int = requests.get(images["url"] ) if img.status_code == 200: __UpperCamelCase : Any = Image.open(BytesIO(img.content ) ) with open(F"{class_data_dir}/images/{total}.jpg" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(F"{class_data_dir}/images/{total}.jpg" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCAmelCase ( ): __UpperCamelCase : Optional[int] = argparse.ArgumentParser("" , add_help=snake_case__ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=snake_case__ , type=snake_case__ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=snake_case__ , type=snake_case__ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=snake_case__ ) return parser.parse_args() if __name__ == "__main__": _lowerCAmelCase = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
298
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations(snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): def count_of_possible_combinations_with_dp_array( snake_case__ , snake_case__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __UpperCamelCase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __UpperCamelCase : List[str] = answer return answer __UpperCamelCase : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [0] * (target + 1) __UpperCamelCase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = 3 _lowerCAmelCase = 5 _lowerCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : int = list(snake_case__ ) __UpperCamelCase : List[str] = list(snake_case__ ) __UpperCamelCase : str = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count += 1 __UpperCamelCase : Any = "_" if count > 1: return False else: return "".join(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = [] while True: __UpperCamelCase : Optional[int] = ["$"] * len(snake_case__ ) __UpperCamelCase : Union[str, Any] = [] for i in range(len(snake_case__ ) ): for j in range(i + 1 , len(snake_case__ ) ): __UpperCamelCase : Tuple = compare_string(binary[i] , binary[j] ) if k is False: __UpperCamelCase : Any = "*" __UpperCamelCase : str = "*" temp.append("X" ) for i in range(len(snake_case__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case__ ) == 0: return pi __UpperCamelCase : int = list(set(snake_case__ ) ) def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Optional[int] = [] for minterm in minterms: __UpperCamelCase : Optional[Any] = "" for _ in range(snake_case__ ): __UpperCamelCase : str = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case__ ) return temp def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Tuple = list(snake_case__ ) __UpperCamelCase : Optional[Any] = list(snake_case__ ) __UpperCamelCase : Optional[int] = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Dict = [] __UpperCamelCase : int = [0] * len(snake_case__ ) for i in range(len(chart[0] ) ): __UpperCamelCase : Union[str, Any] = 0 __UpperCamelCase : Union[str, Any] = -1 for j in range(len(snake_case__ ) ): if chart[j][i] == 1: count += 1 __UpperCamelCase : Any = j if count == 1: __UpperCamelCase : List[str] = 1 for i in range(len(snake_case__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case__ ) ): __UpperCamelCase : Dict = 0 temp.append(prime_implicants[i] ) while True: __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : Dict = -1 __UpperCamelCase : List[str] = 0 for i in range(len(snake_case__ ) ): __UpperCamelCase : List[Any] = chart[i].count(1 ) if count_n > max_n: __UpperCamelCase : Tuple = count_n __UpperCamelCase : str = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case__ ) ): __UpperCamelCase : Optional[Any] = 0 def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : Any = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )] for i in range(len(snake_case__ ) ): __UpperCamelCase : List[str] = prime_implicants[i].count("_" ) for j in range(len(snake_case__ ) ): if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ): __UpperCamelCase : Optional[int] = 1 return chart def __lowerCAmelCase ( ): __UpperCamelCase : str = int(input("Enter the no. of variables\n" ) ) __UpperCamelCase : str = [ float(snake_case__ ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] __UpperCamelCase : Tuple = decimal_to_binary(snake_case__ , snake_case__ ) __UpperCamelCase : int = check(snake_case__ ) print("Prime Implicants are:" ) print(snake_case__ ) __UpperCamelCase : List[str] = prime_implicant_chart(snake_case__ , snake_case__ ) __UpperCamelCase : Tuple = selection(snake_case__ , snake_case__ ) print("Essential Prime Implicants are:" ) print(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
298
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = "ibert" def __init__(self , _UpperCAmelCase=3_0_5_2_2 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=1_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=False , _UpperCAmelCase="none" , **_UpperCAmelCase , ) -> Dict: super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = vocab_size __UpperCamelCase : Any = hidden_size __UpperCamelCase : Optional[Any] = num_hidden_layers __UpperCamelCase : List[str] = num_attention_heads __UpperCamelCase : List[Any] = hidden_act __UpperCamelCase : Any = intermediate_size __UpperCamelCase : Dict = hidden_dropout_prob __UpperCamelCase : List[str] = attention_probs_dropout_prob __UpperCamelCase : List[Any] = max_position_embeddings __UpperCamelCase : Tuple = type_vocab_size __UpperCamelCase : Optional[int] = initializer_range __UpperCamelCase : Optional[int] = layer_norm_eps __UpperCamelCase : Optional[int] = position_embedding_type __UpperCamelCase : Any = quant_mode __UpperCamelCase : List[str] = force_dequant class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @property def a_ (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( snake_case__ ): # This function is recursive __UpperCamelCase : str = len(snake_case__ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __UpperCamelCase : Optional[Any] = array[0] __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : str = 1 __UpperCamelCase : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: __UpperCamelCase : Optional[int] = True __UpperCamelCase : List[Any] = [element for element in array[i:] if element >= array[i]] __UpperCamelCase : Tuple = longest_subsequence(snake_case__ ) if len(snake_case__ ) > len(snake_case__ ): __UpperCamelCase : Optional[Any] = temp_array else: i += 1 __UpperCamelCase : List[str] = [element for element in array[1:] if element >= pivot] __UpperCamelCase : int = [pivot, *longest_subsequence(snake_case__ )] if len(snake_case__ ) > len(snake_case__ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def __lowerCAmelCase ( snake_case__ , snake_case__ ): __UpperCamelCase : List[Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } __UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __UpperCamelCase : Any = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" ) __UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) __UpperCamelCase : Union[str, Any] = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) __UpperCamelCase : int = original_bort._collect_params_with_prefix() # Build our config 🤗 __UpperCamelCase : Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case__ ), } __UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ ) __UpperCamelCase : str = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ , snake_case__ ): __UpperCamelCase : Any = hf_param.shape __UpperCamelCase : List[Any] = to_torch(params[gluon_param] ) __UpperCamelCase : Union[str, Any] = gluon_param.shape assert ( shape_hf == shape_gluon ), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param __UpperCamelCase : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __UpperCamelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __UpperCamelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __UpperCamelCase : Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __UpperCamelCase : BertSelfAttention = layer.attention.self __UpperCamelCase : int = check_and_map_params( self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) __UpperCamelCase : str = check_and_map_params( self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) __UpperCamelCase : List[str] = check_and_map_params( self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) __UpperCamelCase : Tuple = check_and_map_params( self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output __UpperCamelCase : BertSelfOutput = layer.attention.output __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" ) __UpperCamelCase : List[Any] = check_and_map_params( self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" ) __UpperCamelCase : Optional[int] = check_and_map_params( self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate __UpperCamelCase : BertIntermediate = layer.intermediate __UpperCamelCase : Dict = check_and_map_params( intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) __UpperCamelCase : List[Any] = check_and_map_params( intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output __UpperCamelCase : BertOutput = layer.output __UpperCamelCase : Dict = check_and_map_params( bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) __UpperCamelCase : Union[str, Any] = check_and_map_params( bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) __UpperCamelCase : List[str] = check_and_map_params( bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) __UpperCamelCase : int = check_and_map_params( bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" ) __UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"] # Get gluon output __UpperCamelCase : Dict = mx.nd.array([input_ids] ) __UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) __UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() __UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" ) __UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0] __UpperCamelCase : List[Any] = output_gluon[0].asnumpy() __UpperCamelCase : Optional[int] = output_hf[0].detach().numpy() __UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() __UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
1
'''simple docstring''' _lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): __UpperCamelCase : Optional[Any] = set() # keep track of all the paths to be checked __UpperCamelCase : List[str] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue __UpperCamelCase : int = queue.pop(0 ) # get the last node from the path __UpperCamelCase : Optional[int] = path[-1] if node not in explored: __UpperCamelCase : Optional[int] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: __UpperCamelCase : int = list(snake_case__ ) new_path.append(snake_case__ ) queue.append(snake_case__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case__ ) # in case there's no path between the 2 nodes return [] def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 __UpperCamelCase : Union[str, Any] = [start] __UpperCamelCase : str = set(snake_case__ ) # Keep tab on distances from `start` node. __UpperCamelCase : List[Any] = {start: 0, target: -1} while queue: __UpperCamelCase : List[Any] = queue.pop(0 ) if node == target: __UpperCamelCase : int = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case__ ) queue.append(snake_case__ ) __UpperCamelCase : Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
298
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> Tuple: return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a_ (self ) -> str: return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def __lowerCAmelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def __lowerCAmelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_beam def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : Optional[int] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> Optional[Any]: import apache_beam as beam __UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet __UpperCamelCase : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __UpperCamelCase : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def a_ (self ) -> str: with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ (self ) -> List[str]: __UpperCamelCase : Tuple = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __UpperCamelCase : Union[str, Any] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , _UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=5_6 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , _UpperCAmelCase="block_sparse" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Any = parent __UpperCamelCase : Dict = batch_size __UpperCamelCase : List[Any] = seq_length __UpperCamelCase : List[str] = is_training __UpperCamelCase : List[str] = use_attention_mask __UpperCamelCase : Optional[Any] = use_token_type_ids __UpperCamelCase : str = use_labels __UpperCamelCase : List[Any] = vocab_size __UpperCamelCase : Union[str, Any] = hidden_size __UpperCamelCase : str = num_hidden_layers __UpperCamelCase : str = num_attention_heads __UpperCamelCase : List[Any] = intermediate_size __UpperCamelCase : str = hidden_act __UpperCamelCase : str = hidden_dropout_prob __UpperCamelCase : List[str] = attention_probs_dropout_prob __UpperCamelCase : Optional[int] = max_position_embeddings __UpperCamelCase : Optional[Any] = type_vocab_size __UpperCamelCase : Union[str, Any] = type_sequence_label_size __UpperCamelCase : Optional[Any] = initializer_range __UpperCamelCase : Optional[int] = num_choices __UpperCamelCase : int = rescale_embeddings __UpperCamelCase : Optional[int] = attention_type __UpperCamelCase : Union[str, Any] = use_bias __UpperCamelCase : Tuple = block_size __UpperCamelCase : Tuple = num_random_blocks def a_ (self ) -> Tuple: __UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : Any = None if self.use_attention_mask: __UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase : Union[str, Any] = None if self.use_token_type_ids: __UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase : Tuple = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def a_ (self ) -> Optional[Any]: __UpperCamelCase : List[str] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = config_and_inputs __UpperCamelCase : Optional[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) A = False A = False def a_ (self ) -> Dict: __UpperCamelCase : List[str] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> List[Any]: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> int: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> Dict: super().test_hidden_states_output() @slow def a_ (self ) -> Any: for model_class_name in self.all_model_classes: __UpperCamelCase : Tuple = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(_UpperCAmelCase ) def a_ (self ) -> List[Any]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def a_ (self ) -> str: __UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : List[Any] = model_class(_UpperCAmelCase ) @jax.jit def model_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase ) with self.subTest("JIT Enabled" ): __UpperCamelCase : List[Any] = model_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : Optional[Any] = model_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1E-5 , _UpperCAmelCase="outputs" , _UpperCAmelCase=None ) -> Any: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
298
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
1
'''simple docstring''' _lowerCAmelCase = 8.314_462 # Unit - J mol-1 K-1 def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
298
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BlenderbotSmallTokenizer A = False def a_ (self ) -> List[str]: super().setUp() __UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def a_ (self , **_UpperCAmelCase ) -> Dict: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : List[Any] = "adapt act apte" __UpperCamelCase : Dict = "adapt act apte" return input_text, output_text def a_ (self ) -> int: __UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase : str = "adapt act apte" __UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"] __UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __UpperCamelCase : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a_ (self ) -> int: __UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __UpperCamelCase : Dict = "I am a small frog." __UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def a_ (self ) -> List[Any]: __UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __UpperCamelCase : Tuple = "I am a small frog ." __UpperCamelCase : List[str] = "." __UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"] __UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"] assert encoded[-1] == encoded_dot[0]
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): __UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: __UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy) __UpperCamelCase : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCAmelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
298
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''RegNetConfig''' # Base docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = '''facebook/regnet-y-040''' _lowerCAmelCase = '''tabby, tabby cat''' _lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __UpperCamelCase : Tuple = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , ) __UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) ) __UpperCamelCase : Dict = self.normalization(_UpperCAmelCase ) __UpperCamelCase : Dict = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = config.num_channels __UpperCamelCase : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def a_ (self , _UpperCAmelCase ) -> Tuple: __UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) ) __UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" ) __UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor: return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase ) class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) __UpperCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def a_ (self , _UpperCAmelCase ) -> Tuple: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase ) for layer_module in self.attention: __UpperCamelCase : str = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1 __UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : List[Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __UpperCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ), ] __UpperCamelCase : Dict = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : List[Any] = hidden_state for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Tuple = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : str = in_channels != out_channels or stride != 1 __UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width ) __UpperCamelCase : Union[str, Any] = ( TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __UpperCamelCase : Union[str, Any] = [ TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ), ] __UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act] def a_ (self , _UpperCAmelCase ) -> int: __UpperCamelCase : str = hidden_state for layer_module in self.layers: __UpperCamelCase : Any = layer_module(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __UpperCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ), *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )], ] def a_ (self , _UpperCAmelCase ) -> Any: for layer_module in self.layers: __UpperCamelCase : Dict = layer_module(_UpperCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention: __UpperCamelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCamelCase : Any = hidden_states + (hidden_state,) __UpperCamelCase : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __UpperCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): '''simple docstring''' A = RegNetConfig def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: super().__init__(**_UpperCAmelCase ) __UpperCamelCase : Optional[int] = config __UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" ) __UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" ) __UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" ) @unpack_inputs def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __UpperCamelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : str = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : List[str] = encoder_outputs[0] __UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase ) # Change to NCHW output format have uniformity in the modules __UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) __UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A = RegNetConfig A = "regnet" A = "pixel_values" @property def a_ (self ) -> List[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __UpperCamelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Tuple = self.regnet( pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = config.num_labels __UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" ) # classification head __UpperCamelCase : List[str] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __UpperCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict __UpperCamelCase : Dict = self.regnet( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] __UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase ) __UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase ) __UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase ) if not return_dict: __UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
298
1
'''simple docstring''' def __lowerCAmelCase ( snake_case__ ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence __UpperCamelCase : Any = gray_code_sequence_string(snake_case__ ) # # convert them to integers for i in range(len(snake_case__ ) ): __UpperCamelCase : List[str] = int(sequence[i] , 2 ) return sequence def __lowerCAmelCase ( snake_case__ ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCamelCase : Union[str, Any] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCamelCase : Any = gray_code_sequence_string(bit_count - 1 ) __UpperCamelCase : Optional[int] = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCamelCase : List[Any] = "0" + smaller_sequence[i] sequence.append(snake_case__ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCamelCase : Optional[Any] = "1" + smaller_sequence[i] sequence.append(snake_case__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
298
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : Tuple = torch.exp(snake_case__ ) __UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i) __UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(snake_case__ ) - B / A class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Any = config.output_attentions __UpperCamelCase : Dict = config.output_hidden_states __UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def a_ (self , _UpperCAmelCase ) -> int: if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase : str = x else: __UpperCamelCase : List[Any] = x def a_ (self , _UpperCAmelCase ) -> str: __UpperCamelCase : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : Optional[Any] = () __UpperCamelCase : Tuple = () __UpperCamelCase : Dict = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase : Tuple = all_hidden_states + (hidden_states,) __UpperCamelCase : Optional[int] = layer_module( _UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Tuple = layer_outputs[0] if self.output_attentions: __UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],) __UpperCamelCase : Any = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Any = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : int = current_outputs + (all_attentions,) __UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase ) # logits, pooled_output if not self.training: __UpperCamelCase : Dict = highway_exit[0] __UpperCamelCase : Any = entropy(_UpperCAmelCase ) __UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_UpperCAmelCase , i + 1 ) else: __UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase : int = all_hidden_states + (hidden_states,) __UpperCamelCase : Dict = (hidden_states,) if self.output_hidden_states: __UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase : Optional[int] = outputs + (all_attentions,) __UpperCamelCase : List[Any] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = config __UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase ) __UpperCamelCase : str = BertPooler(_UpperCAmelCase ) self.init_weights() def a_ (self ) -> Any: self.encoder.init_highway_pooler(self.pooler ) def a_ (self ) -> Optional[int]: return self.embeddings.word_embeddings def a_ (self , _UpperCAmelCase ) -> Dict: __UpperCamelCase : int = value def a_ (self , _UpperCAmelCase ) -> Tuple: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __UpperCamelCase : Tuple = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if encoder_attention_mask is None: __UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase : Any = encoder_attention_mask[:, None, None, :] __UpperCamelCase : List[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __UpperCamelCase : Optional[int] = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __UpperCamelCase : List[Any] = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __UpperCamelCase : Union[str, Any] = encoder_outputs[0] __UpperCamelCase : Any = self.pooler(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: __UpperCamelCase : Tuple = message __UpperCamelCase : Union[str, Any] = exit_layer # start from 1! class A ( nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Dict: super().__init__() __UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase ) __UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def a_ (self , _UpperCAmelCase ) -> Any: # Pooler __UpperCamelCase : Optional[int] = encoder_outputs[0] __UpperCamelCase : str = self.pooler(_UpperCAmelCase ) # "return" pooler_output # BertModel __UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase : Dict = bmodel_output[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Any = self.classifier(_UpperCAmelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Any: super().__init__(_UpperCAmelCase ) __UpperCamelCase : List[Any] = config.num_labels __UpperCamelCase : List[Any] = config.num_hidden_layers __UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase ) __UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int: __UpperCamelCase : int = self.num_layers try: __UpperCamelCase : Tuple = self.bert( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase : str = outputs[1] __UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase ) __UpperCamelCase : Dict = self.classifier(_UpperCAmelCase ) __UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase : int = e.message __UpperCamelCase : Optional[Any] = e.exit_layer __UpperCamelCase : Optional[int] = outputs[0] if not self.training: __UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase ) __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : Any = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase : List[str] = MSELoss() __UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Dict = CrossEntropyLoss() __UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase : List[Any] = [] for highway_exit in outputs[-1]: __UpperCamelCase : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase : Union[str, Any] = MSELoss() __UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase : Optional[Any] = CrossEntropyLoss() __UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase : Dict = (loss,) + outputs if not self.training: __UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase : int = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
298
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
298
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = '''config.json''' _lowerCAmelCase = '''diffusion_pytorch_model.bin''' _lowerCAmelCase = '''diffusion_flax_model.msgpack''' _lowerCAmelCase = '''model.onnx''' _lowerCAmelCase = '''diffusion_pytorch_model.safetensors''' _lowerCAmelCase = '''weights.pb''' _lowerCAmelCase = '''https://huggingface.co''' _lowerCAmelCase = default_cache_path _lowerCAmelCase = '''diffusers_modules''' _lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) _lowerCAmelCase = ['''fp16''', '''non-ema'''] _lowerCAmelCase = '''.self_attn'''
298
1