code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig lowerCAmelCase: Optional[int] ={ "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", } class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): __UpperCAmelCase = """ernie_m""" __UpperCAmelCase = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""} def __init__( self , snake_case = 2_5_0_0_0_2 , snake_case = 7_6_8 , snake_case = 1_2 , snake_case = 1_2 , snake_case = 3_0_7_2 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 5_1_4 , snake_case = 0.02 , snake_case = 1 , snake_case = 1E-05 , snake_case=None , snake_case=False , snake_case=0.0 , **snake_case , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowercase : List[Any] = vocab_size lowercase : Tuple = hidden_size lowercase : Optional[Any] = num_hidden_layers lowercase : Optional[Any] = num_attention_heads lowercase : List[str] = intermediate_size lowercase : Optional[int] = hidden_act lowercase : Optional[Any] = hidden_dropout_prob lowercase : Union[str, Any] = attention_probs_dropout_prob lowercase : Optional[Any] = max_position_embeddings lowercase : Union[str, Any] = initializer_range lowercase : Dict = layer_norm_eps lowercase : List[str] = classifier_dropout lowercase : Union[str, Any] = is_decoder lowercase : str = act_dropout
607
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[Any] ="[PAD]" lowerCamelCase__: Tuple =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "[PAD]") self.assertEqual(vocab_keys[1] , "[CLS]") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase_) , 1_012) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] ="Hello World!" lowerCamelCase__: Dict =[35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_)) @slow def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
59
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = { "configuration_blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ["BlenderbotSmallTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBlenderbotSmallPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
393
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small" lowerCamelCase__: Tuple =tempfile.mkdtemp() lowerCamelCase__: Tuple ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: List[str] ="speaker_embeddings_path.json" lowerCamelCase__: int ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Any =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: List[str] =35 lowerCamelCase__: Optional[Any] =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[int] ={ "semantic_prompt": np.ones(UpperCAmelCase_), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: int =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz") np.savez(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_) lowerCamelCase__: List[Any] =processor(text=self.input_string) lowerCamelCase__: Optional[int] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
59
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = ShapEPipeline UpperCamelCase_ = ['''prompt'''] UpperCamelCase_ = ['''prompt'''] UpperCamelCase_ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] UpperCamelCase_ = False @property def A__ ( self : List[str] ) -> List[str]: '''simple docstring''' return 32 @property def A__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' return 32 @property def A__ ( self : List[Any] ) -> Dict: '''simple docstring''' return self.time_input_dim * 4 @property def A__ ( self : Optional[int] ) -> int: '''simple docstring''' return 8 @property def A__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def A__ ( self : int ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) lowercase : Optional[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCAmelCase_ ) @property def A__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase : List[str] ={ "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } lowercase : Optional[Any] =PriorTransformer(**UpperCAmelCase_ ) return model @property def A__ ( self : Any ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) lowercase : List[Any] ={ "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } lowercase : Tuple =ShapERenderer(**UpperCAmelCase_ ) return model def A__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' lowercase : str =self.dummy_prior lowercase : Optional[Any] =self.dummy_text_encoder lowercase : Optional[int] =self.dummy_tokenizer lowercase : List[Any] =self.dummy_renderer lowercase : Any =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCAmelCase_ , clip_sample=UpperCAmelCase_ , clip_sample_range=1.0 , ) lowercase : Dict ={ "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def A__ ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=0 ) -> str: '''simple docstring''' if str(UpperCAmelCase_ ).startswith('''mps''' ): lowercase : str =torch.manual_seed(UpperCAmelCase_ ) else: lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowercase : Tuple ={ "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def A__ ( self : List[Any] ) -> Any: '''simple docstring''' lowercase : str ="cpu" lowercase : Optional[int] =self.get_dummy_components() lowercase : Dict =self.pipeline_class(**UpperCAmelCase_ ) lowercase : Any =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase : Any =pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) lowercase : List[str] =output.images[0] lowercase : Any =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase : int =np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A__ ( self : Any ) -> List[str]: '''simple docstring''' lowercase : List[Any] =torch_device == "cpu" lowercase : int =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , ) def A__ ( self : Tuple ) -> Any: '''simple docstring''' lowercase : Tuple =self.get_dummy_components() lowercase : List[Any] =self.pipeline_class(**UpperCAmelCase_ ) lowercase : List[str] =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase : Dict =1 lowercase : List[str] =2 lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase_ ) for key in inputs.keys(): if key in self.batch_params: lowercase : List[str] =batch_size * [inputs[key]] lowercase : Union[str, Any] =pipe(**UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : List[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self : Any ) -> List[Any]: '''simple docstring''' lowercase : Union[str, Any] =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) lowercase : Any =ShapEPipeline.from_pretrained('''openai/shap-e''' ) lowercase : Union[str, Any] =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase : Dict =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 ) lowercase : Dict =pipe( '''a shark''' , generator=UpperCAmelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
94
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "CLIPImageProcessor" lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: int =kwargs.pop("feature_extractor") lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names lowerCamelCase__: str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
59
0
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]="" , snake_case__ : Optional[int]="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : Dict = parent_key + sep + k if parent_key else k if isinstance(__a , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__a , __a , sep=__a ).items() ) else: items.append((new_key, v) ) return dict(__a ) _snake_case : Optional[Any] = argparse.Namespace() with open(__a , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(__a , Loader=yaml.FullLoader ) _snake_case : Dict = flatten_yaml_as_dict(__a ) for k, v in flat_cfg.items(): setattr(__a , __a , __a ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(__a , str(__a ) ) ) return config def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[str] = MobileViTVaConfig() _snake_case : Union[str, Any] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Tuple = 3_84 else: _snake_case : str = 2_56 _snake_case : Optional[int] = "imagenet-1k-id2label.json" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : Optional[int] = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Any = 2_56 _snake_case : int = "imagenet-22k-id2label.json" elif task_name.startswith("""ade20k_""" ): _snake_case : Optional[int] = 1_51 _snake_case : Optional[int] = 5_12 _snake_case : Optional[int] = "ade20k-id2label.json" _snake_case : Tuple = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : Any = "pascal-voc-id2label.json" _snake_case : str = True # orig_config _snake_case : Optional[Any] = load_orig_config_file(__a ) assert getattr(__a , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : Optional[Any] = getattr(__a , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(__a , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : Dict = getattr(__a , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Optional[int] = getattr(__a , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Dict = getattr(__a , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Optional[int] = getattr(__a , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Dict = getattr(__a , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Dict = "huggingface/label-files" _snake_case : Tuple = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) ) _snake_case : Tuple = {int(__a ): v for k, v in idalabel.items()} _snake_case : int = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(__a ) _snake_case : Any = val def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any]=False ): """simple docstring""" if base_model: _snake_case : List[Any] = "" else: _snake_case : int = "mobilevitv2." _snake_case : Optional[Any] = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : Optional[Any] = k[8:] else: _snake_case : List[str] = k if ".block." in k: _snake_case : List[str] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Any = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : List[Any] = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : Optional[Any] = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : List[str] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : List[Any] = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Optional[Any] = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : int = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Tuple = [0, 1, 2, 3] elif i == 5: _snake_case : Optional[int] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : List[str] = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : Optional[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : int = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Optional[Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : str = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : str = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : int = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : str = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : List[Any] = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(__a ) for k in keys_to_ignore: state_dict.pop(__a , __a ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Optional[int] = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : str = get_mobilevitva_config(__a , __a ) # load original state_dict _snake_case : str = torch.load(__a , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Union[str, Any] = MobileViTVaForSemanticSegmentation(__a ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(__a ).eval() _snake_case : Optional[int] = False # remove and rename some keys of load the original model _snake_case : Optional[Any] = checkpoint remove_unused_keys(__a ) _snake_case : List[str] = create_rename_keys(__a , base_model=__a ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__a , __a , __a ) # load modified state_dict model.load_state_dict(__a ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : List[Any] = model(**__a ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : Union[str, Any] = outputs.logits _snake_case : List[Any] = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : Any = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , __a , atol=1e-4 ) Path(__a ).mkdir(exist_ok=__a ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__a ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__a ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' '''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
609
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" for param in module.parameters(): lowerCamelCase__: Tuple =False def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__: str ="mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[str] =datetime.now() lowerCamelCase__: str =current_time.strftime("%H:%M:%S" ) return timestamp
59
0
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def _snake_case ( lowerCAmelCase : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = split_dict._to_yaml_list() assert len(__a ) == len(__a ) SCREAMING_SNAKE_CASE_ : List[str] = SplitDict._from_yaml_list(__a ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump SCREAMING_SNAKE_CASE_ : Any = None # the split name of split_dict takes over the name of the split info object SCREAMING_SNAKE_CASE_ : str = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] ) def _snake_case ( lowerCAmelCase : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
216
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" def _UpperCamelCase ( ) -> str: """simple docstring""" __UpperCAmelCase : str = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] __UpperCAmelCase : List[str] = 6 __UpperCAmelCase : int = 1 __UpperCAmelCase : int = 1901 __UpperCAmelCase : List[str] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 __UpperCAmelCase : int = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 __UpperCAmelCase : Dict = day - 29 else: if day > days_per_month[month - 1]: month += 1 __UpperCAmelCase : Tuple = day - days_per_month[month - 2] if month > 12: year += 1 __UpperCAmelCase : Union[str, Any] = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
77
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"] lowercase_ = DistilBertTokenizer def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Optional[int] =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: Any =tokenize_chinese_chars lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: str =do_lower_case def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict: '''simple docstring''' lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
59
0
"""simple docstring""" import numpy as np def _snake_case ( lowercase__ ): return 1 / (1 + np.exp(-vector )) def _snake_case ( lowercase__ ): return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
630
import operator as op def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[Any] =[] lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation lowerCamelCase__: Tuple ={ "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(__a )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__a ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) else: lowerCamelCase__: List[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) lowerCamelCase__: Optional[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) stack.append( str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
59
0
import requests lowerCamelCase_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def lowerCamelCase ( a_ ) -> None: lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): lowercase : List[str] = 'decision_transformer' lowercase : Optional[int] = ['past_key_values'] lowercase : Tuple = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=17 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_0256 , SCREAMING_SNAKE_CASE_=5_0256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase : List[str] = state_dim UpperCamelCase : str = act_dim UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Any = max_ep_len UpperCamelCase : Any = action_tanh UpperCamelCase : List[str] = vocab_size UpperCamelCase : Optional[int] = n_positions UpperCamelCase : str = n_layer UpperCamelCase : List[Any] = n_head UpperCamelCase : List[Any] = n_inner UpperCamelCase : str = activation_function UpperCamelCase : str = resid_pdrop UpperCamelCase : Any = embd_pdrop UpperCamelCase : str = attn_pdrop UpperCamelCase : Tuple = layer_norm_epsilon UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : str = scale_attn_weights UpperCamelCase : Optional[Any] = use_cache UpperCamelCase : Any = scale_attn_by_inverse_layer_idx UpperCamelCase : List[Any] = reorder_and_upcast_attn UpperCamelCase : Tuple = bos_token_id UpperCamelCase : List[Any] = eos_token_id super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
499
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = CustomTokenizer pass
59
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict = 10 ): if not isinstance(__a , __a ) or n < 0: raise ValueError('Invalid input' ) lowerCAmelCase = 10**n lowerCAmelCase = 2_8433 * (pow(2 , 783_0457 , __a )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
4
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: List[Any] =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) lowerCamelCase__: Any =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) lowerCamelCase__: Tuple =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Union[str, Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Dict =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""") with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""") lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) if __name__ == "__main__": __A = Accelerator() __A = (accelerator.state.process_index + 2, 10) __A = torch.randint(0, 10, shape).to(accelerator.device) __A = "" __A = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
59
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A ={ '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
463
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): lowerCamelCase__: Any =[image] lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image] lowerCamelCase__: Dict =torch.stack(__a ) return image class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_) lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0) lowerCamelCase__: int =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""") lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") lowerCamelCase__: Dict =init_latents.shape lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) # get latents print("add noise to latents at timestep" , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =init_latents return latents @torch.no_grad() def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase_) # 2. Preprocess image lowerCamelCase__: Dict =preprocess(UpperCAmelCase_) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device) lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device) lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_) # 4. Prepare latent variables lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_) lowerCamelCase__: Tuple =latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase_): # 1. predict noise model_output lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__: Optional[int] =self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1) lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase_)
59
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> str: """simple docstring""" lowercase : Union[str, Any] = "ylacombe/bark-small" lowercase : Tuple = tempfile.mkdtemp() lowercase : Tuple = "en_speaker_1" lowercase : Optional[int] = "This is a test string" lowercase : List[str] = "speaker_embeddings_path.json" lowercase : int = "speaker_embeddings" def _UpperCAmelCase ( self , **snake_case ) -> Tuple: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Any: """simple docstring""" lowercase : List[Any] = self.get_tokenizer() lowercase : List[str] = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) lowercase : Dict = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" lowercase : Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" lowercase : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase : List[str] = 3_5 lowercase : Optional[Any] = 2 lowercase : Optional[Any] = 8 lowercase : Optional[int] = { "semantic_prompt": np.ones(UpperCAmelCase_ ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase : Any = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) lowercase : int = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase : Union[str, Any] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) lowercase : Tuple = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) lowercase : Optional[Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase : Any = processor(text=self.input_string , voice_preset=self.voice_preset ) def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" lowercase : str = self.get_tokenizer() lowercase : Dict = BarkProcessor(tokenizer=UpperCAmelCase_ ) lowercase : List[Any] = processor(text=self.input_string ) lowercase : Optional[int] = tokenizer( self.input_string , padding="""max_length""" , max_length=2_5_6 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
607
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
0
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = AutoencoderKL UpperCAmelCase : Any = """sample""" UpperCAmelCase : Tuple = 1E-2 @property def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = (32, 32) SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ ) return {"sample": image} @property def _lowercase ( self : List[str] ) -> Optional[int]: """simple docstring""" return (3, 32, 32) @property def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return (3, 32, 32) def _lowercase ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } SCREAMING_SNAKE_CASE_ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" pass def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = self.model_class(**UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) assert not model.is_gradient_checkpointing and model.training SCREAMING_SNAKE_CASE_ = model(**UpperCAmelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() SCREAMING_SNAKE_CASE_ = torch.randn_like(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing SCREAMING_SNAKE_CASE_ = self.model_class(**UpperCAmelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training SCREAMING_SNAKE_CASE_ = model_a(**UpperCAmelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() SCREAMING_SNAKE_CASE_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) SCREAMING_SNAKE_CASE_ = dict(model.named_parameters() ) SCREAMING_SNAKE_CASE_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def _lowercase ( self : Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) SCREAMING_SNAKE_CASE_ = model.to(UpperCAmelCase_ ) model.eval() if torch_device == "mps": SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) SCREAMING_SNAKE_CASE_ = image.to(UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ , generator=UpperCAmelCase_ ).sample SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": SCREAMING_SNAKE_CASE_ = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": SCREAMING_SNAKE_CASE_ = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: SCREAMING_SNAKE_CASE_ = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) ) @slow class snake_case ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" return F'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase_ ) for s in shape] )}.npy''' def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : int , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Any=(4, 3, 512, 512) , lowerCAmelCase_ : int=False ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = torch.floataa if fpaa else torch.floataa SCREAMING_SNAKE_CASE_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase_ , UpperCAmelCase_ ) ) ).to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) return image def _lowercase ( self : Optional[int] , lowerCAmelCase_ : List[Any]="CompVis/stable-diffusion-v1-4" , lowerCAmelCase_ : Any=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = "fp16" if fpaa else None SCREAMING_SNAKE_CASE_ = torch.floataa if fpaa else torch.floataa SCREAMING_SNAKE_CASE_ = AutoencoderKL.from_pretrained( UpperCAmelCase_ , subfolder='''vae''' , torch_dtype=UpperCAmelCase_ , revision=UpperCAmelCase_ , ) model.to(UpperCAmelCase_ ).eval() return model def _lowercase ( self : str , lowerCAmelCase_ : Optional[int]=0 ) -> List[str]: """simple docstring""" if torch_device == "mps": return torch.manual_seed(UpperCAmelCase_ ) return torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def _lowercase ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_generator(UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model(fpaa=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ , fpaa=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_generator(UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ ) assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(UpperCAmelCase_ ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] SCREAMING_SNAKE_CASE_ = sample[-1, -2:, :2, -2:].flatten().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ ) assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model(fpaa=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] SCREAMING_SNAKE_CASE_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ ) assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def _lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model(fpaa=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def _lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.decode(UpperCAmelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def _lowercase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ = self.get_sd_image(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.get_generator(UpperCAmelCase_ ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model.encode(UpperCAmelCase_ ).latent_dist SCREAMING_SNAKE_CASE_ = dist.sample(generator=UpperCAmelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] SCREAMING_SNAKE_CASE_ = sample[0, -1, -3:, -3:].flatten().cpu() SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ )
393
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] ={} if "candidate_labels" in kwargs: lowerCamelCase__: Tuple =kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase__: Tuple =kwargs["hypothesis_template"] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str: '''simple docstring''' lowerCamelCase__: int =load_image(UpperCAmelCase_) lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework) lowerCamelCase__: Any =candidate_labels lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels] lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_) lowerCamelCase__: str =[text_inputs] return inputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: int =model_inputs.pop("candidate_labels") lowerCamelCase__: List[str] =model_inputs.pop("text_inputs") if isinstance(text_inputs[0] , UpperCAmelCase_): lowerCamelCase__: List[Any] =text_inputs[0] else: # Batching case. lowerCamelCase__: List[Any] =text_inputs[0][0] lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: str ={ "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels") lowerCamelCase__: Optional[int] =model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1) lowerCamelCase__: Optional[Any] =probs.tolist() if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Optional[int] =[scores] elif self.framework == "tf": lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1) lowerCamelCase__: Optional[int] =probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""") lowerCamelCase__: Optional[int] =[ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0]) ] return result
59
0
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : Optional[Any]=[1, 1, 2, 1] , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]="relu" , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[Any]=None , ) -> str: '''simple docstring''' lowercase : str =parent lowercase : List[Any] =batch_size lowercase : List[str] =image_size lowercase : str =num_channels lowercase : int =embeddings_size lowercase : List[Any] =hidden_sizes lowercase : str =depths lowercase : List[Any] =is_training lowercase : Tuple =use_labels lowercase : Dict =hidden_act lowercase : Optional[int] =num_labels lowercase : Optional[Any] =scope lowercase : List[str] =len(UpperCAmelCase_ ) def A__ ( self : Dict ) -> Tuple: '''simple docstring''' lowercase : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Any =self.get_config() return config, pixel_values def A__ ( self : List[str] ) -> str: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A__ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Union[str, Any]: '''simple docstring''' lowercase : List[Any] =FlaxRegNetModel(config=UpperCAmelCase_ ) lowercase : Dict =model(UpperCAmelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Any: '''simple docstring''' lowercase : str =self.num_labels lowercase : int =FlaxRegNetForImageClassification(config=UpperCAmelCase_ ) lowercase : Optional[Any] =model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self : List[str] ) -> int: '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() lowercase : Union[str, Any] =config_and_inputs lowercase : Union[str, Any] ={"pixel_values": pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCamelCase_ = False UpperCamelCase_ = False UpperCamelCase_ = False def A__ ( self : Union[str, Any] ) -> None: '''simple docstring''' lowercase : List[Any] =FlaxRegNetModelTester(self ) lowercase : List[Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def A__ ( self : int ) -> int: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self : Optional[int] ) -> int: '''simple docstring''' return def A__ ( self : Optional[int] ) -> Dict: '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def A__ ( self : List[str] ) -> int: '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def A__ ( self : Optional[int] ) -> str: '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def A__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' pass def A__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Any =model_class(UpperCAmelCase_ ) lowercase : List[Any] =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : str =[*signature.parameters.keys()] lowercase : Tuple =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def A__ ( self : int ) -> Optional[int]: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ): lowercase : Optional[int] =model_class(UpperCAmelCase_ ) lowercase : Optional[int] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) lowercase : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : Optional[int] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 ) lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Any =True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def A__ ( self : Dict ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase : int =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Dict =model_class(UpperCAmelCase_ ) @jax.jit def model_jitted(UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): return model(pixel_values=UpperCAmelCase_ , **UpperCAmelCase_ ) with self.subTest('''JIT Enabled''' ): lowercase : Optional[Any] =model_jitted(**UpperCAmelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase : Union[str, Any] =model_jitted(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( ) -> Any: """simple docstring""" lowercase : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self : List[Any] ) -> int: '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def A__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' lowercase : Any =FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowercase : Optional[int] =self.default_image_processor lowercase : Any =prepare_img() lowercase : Any =image_processor(images=UpperCAmelCase_ , return_tensors='''np''' ) lowercase : Tuple =model(**UpperCAmelCase_ ) # verify the logits lowercase : List[Any] =(1, 1000) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) lowercase : str =jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
94
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 lowercase_ = jnp.floataa lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' super().setup() lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype) def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = FlaxBigBirdForNaturalQuestionsModule def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" def cross_entropy(__a , __a , __a=None ): lowerCamelCase__: Tuple =logits.shape[-1] lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" ) lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 ) lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase__: Optional[Any] =reduction(__a ) return loss lowerCamelCase__: str =partial(__a , reduction=jnp.mean ) lowerCamelCase__: str =cross_entropy(__a , __a ) lowerCamelCase__: Optional[int] =cross_entropy(__a , __a ) lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = "google/bigbird-roberta-base" lowercase_ = 3000 lowercase_ = 1_0500 lowercase_ = 128 lowercase_ = 3 lowercase_ = 1 lowercase_ = 5 # tx_args lowercase_ = 3E-5 lowercase_ = 0.0 lowercase_ = 2_0000 lowercase_ = 0.0095 lowercase_ = "bigbird-roberta-natural-questions" lowercase_ = "training-expt" lowercase_ = "data/nq-training.jsonl" lowercase_ = "data/nq-validation.jsonl" def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir) lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count() @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 4096 # no dynamic padding on TPUs def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_) lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_) return batch def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"]) lowerCamelCase__: Union[str, Any] ={ "input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa), } return batch def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids] return zip(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))] while len(UpperCAmelCase_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def lowerCAmelCase_ ( __a , __a , __a=None ) -> str: """simple docstring""" if seed is not None: lowerCamelCase__: Any =dataset.shuffle(seed=__a ) for i in range(len(__a ) // batch_size ): lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__a ) @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]: """simple docstring""" def loss_fn(__a ): lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" ) lowerCamelCase__: int =model_inputs.pop("end_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs return state.loss_fn( __a , __a , __a , __a , __a , __a , ) lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a ) lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" ) lowerCamelCase__: List[str] =state.apply_gradients(grads=__a ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , **__a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =model_inputs.pop("start_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("end_labels" ) lowerCamelCase__: int =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class _SCREAMING_SNAKE_CASE ( train_state.TrainState ): '''simple docstring''' lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = None def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model.params lowerCamelCase__: Tuple =TrainState.create( apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , ) if ckpt_dir is not None: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_) lowerCamelCase__: str =train_state.TrainState( step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , ) lowerCamelCase__: Tuple =args lowerCamelCase__: Tuple =data_collator lowerCamelCase__: str =lr lowerCamelCase__: Dict =params lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_) return state def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.args lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size lowerCamelCase__: List[str] =jax.random.PRNGKey(0) lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count()) for epoch in range(args.max_epochs): lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_) lowerCamelCase__: Dict =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""): lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step) lowerCamelCase__: List[Any] =running_loss.item() / i lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1) lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(UpperCAmelCase_)) self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size) lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: Optional[Any] =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "): lowerCamelCase__: int =self.data_collator(UpperCAmelCase_) lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int: '''simple docstring''' lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ") self.model_save_fn(UpperCAmelCase_ , params=state.params) with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib")) joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib")) with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f: json.dump({"step": state.step.item()} , UpperCAmelCase_) print("DONE") def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase__: Tuple =from_bytes(state.params , f.read() ) with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() ) lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) ) lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) ) with open(os.path.join(__a , "training_state.json" ) , "r" ) as f: lowerCamelCase__: Optional[Any] =json.load(__a ) lowerCamelCase__: Any =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" lowerCamelCase__: int =num_train_steps - warmup_steps lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a ) lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a ) lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str: """simple docstring""" def weight_decay_mask(__a ): lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a ) lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(__a ) lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a ) lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a ) return tx, lr
59
0
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def UpperCAmelCase__ (snake_case__ : Dict ): """simple docstring""" for param in module.parameters(): _snake_case : Tuple = False def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _snake_case : str = "mps" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : Union[str, Any] = plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = datetime.now() _snake_case : str = current_time.strftime("""%H:%M:%S""" ) return timestamp
609
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "ChineseCLIPImageProcessor" lowercase_ = ("BertTokenizer", "BertTokenizerFast") def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : str) ->Dict: '''simple docstring''' lowerCamelCase__: str =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: Tuple =kwargs.pop("feature_extractor") lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.image_processor def __call__(self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: List[str] =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: Union[str, Any] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int) ->str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: str =self.tokenizer.model_input_names lowerCamelCase__: Union[str, Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class
59
0
from maths.prime_check import is_prime def _snake_case ( lowerCAmelCase : int ): """simple docstring""" if not isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Any = f'Input value of [number={number}] must be an integer' raise TypeError(__a ) if is_prime(__a ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
216
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Optional[int] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: Tuple =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ): def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = load_tool("text-to-speech") self.tool.setup() def a_ ( self : Tuple): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Optional[Any] = self.tool("hey") __UpperCAmelCase : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]) , )) def a_ ( self : int): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = self.tool("hey") __UpperCAmelCase : int = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]) , ))
77
def lowerCAmelCase_ ( __a = 50000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =set() lowerCamelCase__: int =int((limit - 24) ** (1 / 2) ) lowerCamelCase__: Tuple =set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __a ) ) ) for primea in primes: lowerCamelCase__: Optional[int] =primea * primea for primea in primes: lowerCamelCase__: List[str] =primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCamelCase__: int =primea * primea * primea * primea lowerCamelCase__: Optional[Any] =square + cube + tetr if total >= limit: break ret.add(__a ) return len(__a ) if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _snake_case ( lowercase__ ): return np.dot(__a , __a ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , *, lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ): _lowerCamelCase : Dict = regularization _lowerCamelCase : Any = gamma if kernel == "linear": _lowerCamelCase : Dict = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('gamma must be float or int' ) if not self.gamma > 0: raise ValueError('gamma must be > 0' ) _lowerCamelCase : Tuple = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: _lowerCamelCase : Optional[Any] = F'''Unknown kernel: {kernel}''' raise ValueError(UpperCAmelCase_ ) def A_ ( self , lowercase , lowercase ): return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self , lowercase , lowercase ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = observations _lowerCamelCase : Optional[int] = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (_lowerCamelCase ) : List[str] = np.shape(UpperCAmelCase_ ) def to_minimize(lowercase ) -> float: _lowerCamelCase : int = 0 (_lowerCamelCase ) : Optional[Any] = np.shape(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ ): for j in range(UpperCAmelCase_ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(UpperCAmelCase_ ) _lowerCamelCase : List[Any] = LinearConstraint(UpperCAmelCase_ , 0 , 0 ) _lowerCamelCase : str = Bounds(0 , self.regularization ) _lowerCamelCase : Union[str, Any] = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_ ) , bounds=UpperCAmelCase_ , constraints=[ly_contraint] ).x _lowerCamelCase : str = l_star # calculating mean offset of separation plane to points _lowerCamelCase : Tuple = 0 for i in range(UpperCAmelCase_ ): for j in range(UpperCAmelCase_ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) _lowerCamelCase : int = s / n def A_ ( self , lowercase ): _lowerCamelCase : Optional[Any] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
630
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: List[str] =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
59
0
from math import pow, sqrt def lowerCamelCase ( *a_ ) -> bool: lowerCAmelCase_ = len(__a ) > 0 and all(value > 0.0 for value in values ) return result def lowerCamelCase ( a_ , a_ ) -> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__a , __a ) else ValueError('Input Error: Molar mass values must greater than 0.' ) ) def lowerCamelCase ( a_ , a_ , a_ ) -> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__a , __a , __a ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase ( a_ , a_ , a_ ) -> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__a , __a , __a ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase ( a_ , a_ , a_ ) -> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(__a , __a , __a ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCamelCase ( a_ , a_ , a_ ) -> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(__a , __a , __a ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) )
318
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( __a ) -> float: """simple docstring""" return np.dot(__a , __a ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : List[str] , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None: '''simple docstring''' lowerCamelCase__: Dict =regularization lowerCamelCase__: Any =gamma if kernel == "linear": lowerCamelCase__: Dict =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") if not isinstance(self.gamma , (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") lowerCamelCase__: Tuple =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None: '''simple docstring''' lowerCamelCase__: Optional[Any] =observations lowerCamelCase__: Optional[int] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: lowerCamelCase__: int =0 ((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0) lowerCamelCase__: str =Bounds(0 , self.regularization) lowerCamelCase__: Union[str, Any] =minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x lowerCamelCase__: str =l_star # calculating mean offset of separation plane to points lowerCamelCase__: Tuple =0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) lowerCamelCase__: int =s / n def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __A : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __A : Union[str, Any] = [0, 25, 50] __A : List[str] = [25, 50, 75] __A : int = fuzz.membership.trimf(X, abca) __A : List[Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __A : Dict = np.ones(75) __A : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __A : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __A : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __A : List[str] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __A : Dict = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __A : Dict = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __A : Optional[int] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __A : Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __A : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
499
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str: """simple docstring""" lowerCamelCase__: int =bnb_quantization_config.load_in_abit lowerCamelCase__: Any =bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) lowerCamelCase__: List[Any] =[] # custom device map if isinstance(__a , __a ) and len(device_map.keys() ) > 1: lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase__: Any =get_keys_to_not_convert(__a ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__a ) lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__a ) # compatibility with peft lowerCamelCase__: List[str] =load_in_abit lowerCamelCase__: int =load_in_abit lowerCamelCase__: Tuple =get_parameter_device(__a ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a ) # convert param to the right dtype lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" ) lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__a ): param.to(__a ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCamelCase__: str =replace_with_bnb_layers( __a , __a , modules_to_not_convert=__a ) lowerCamelCase__: Optional[Any] =get_quantized_model_device_map( __a , __a , __a , max_memory=__a , no_split_module_classes=__a , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase__: Any =True lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__a , device_map=__a , offload_dir=__a ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str: """simple docstring""" if device_map is None: if torch.cuda.is_available(): lowerCamelCase__: str ={"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__a , __a ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) lowerCamelCase__: Optional[int] ={} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase__: Optional[Any] ={} lowerCamelCase__: str =special_dtypes lowerCamelCase__: List[str] =no_split_module_classes lowerCamelCase__: Dict =bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase__: Optional[Any] =get_balanced_memory( __a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , ) lowerCamelCase__: Union[str, Any] =max_memory lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a ) if isinstance(__a , __a ): # check if don't have any quantized module on the cpu lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase__: List[Any] ={ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers( __a , __a , __a , __a ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]: """simple docstring""" lowerCamelCase__: Optional[int] =False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase__: Optional[Any] =[] current_key_name.append(__a ) if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase__: List[str] =".".join(__a ) lowerCamelCase__: Optional[Any] =True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase__: int =False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase__: Dict =bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) lowerCamelCase__: Dict =module.weight.data if module.bias is not None: lowerCamelCase__: List[Any] =module.bias.data bnb_module.requires_grad_(__a ) setattr(__a , __a , __a ) lowerCamelCase__: int =True if len(list(module.children() ) ) > 0: lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers( __a , __a , __a , __a ) lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" with init_empty_weights(): lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase__: str =find_tied_parameters(__a ) # For compatibility with Accelerate < 0.18 if isinstance(__a , __a ): lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase__: str =sum(__a , [] ) lowerCamelCase__: str =len(__a ) > 0 # Check if it is a base model lowerCamelCase__: Optional[Any] =False if hasattr(__a , "base_model_prefix" ): lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase__: Optional[int] =list(model.named_children() ) lowerCamelCase__: Optional[int] =[list_modules[-1][0]] # add last module together with tied weights lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a ) lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a ) # remove ".weight" from the keys lowerCamelCase__: List[Any] =[".weight", ".bias"] lowerCamelCase__: Tuple =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase__: Optional[Any] =name.replace(__a , "" ) filtered_module_names.append(__a ) return filtered_module_names def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" for m in model.modules(): if isinstance(__a , bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" return next(parameter.parameters() ).device def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a ) lowerCamelCase__: Dict =param_name lowerCamelCase__: Tuple =model if "." in tensor_name: lowerCamelCase__: Any =tensor_name.split("." ) for split in splits[:-1]: lowerCamelCase__: Any =getattr(__a , __a ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCamelCase__: str =new_module lowerCamelCase__: int =splits[-1] # offload weights lowerCamelCase__: str =False offload_weight(module._parameters[tensor_name] , __a , __a , index=__a ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , ) else: offload_weight(__a , __a , __a , index=__a ) offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a ) set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
59
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Tuple = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class a ( __SCREAMING_SNAKE_CASE ): snake_case__ = '''roc_bert''' def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=True , _snake_case=0 , _snake_case="absolute" , _snake_case=None , _snake_case=True , _snake_case=True , _snake_case=7_68 , _snake_case=9_10 , _snake_case=5_12 , _snake_case=2_48_58 , _snake_case=True , **_snake_case , ): """simple docstring""" lowerCAmelCase = vocab_size lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = type_vocab_size lowerCAmelCase = layer_norm_eps lowerCAmelCase = use_cache lowerCAmelCase = enable_pronunciation lowerCAmelCase = enable_shape lowerCAmelCase = pronunciation_embed_dim lowerCAmelCase = pronunciation_vocab_size lowerCAmelCase = shape_embed_dim lowerCAmelCase = shape_vocab_size lowerCAmelCase = concat_input lowerCAmelCase = position_embedding_type lowerCAmelCase = classifier_dropout super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
4
from __future__ import annotations from math import pi def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
59
0
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ) -> Dict: lowerCamelCase_ = tokenizer lowerCamelCase_ = tokenizer.bos_token_id lowerCamelCase_ = dataset lowerCamelCase_ = seq_length lowerCamelCase_ = seq_length * chars_per_token * num_of_sequences def __iter__( self ) -> int: lowerCamelCase_ = iter(self.dataset ) lowerCamelCase_ = True while more_examples: lowerCamelCase_ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(UpperCAmelCase_ )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: lowerCamelCase_ = False break lowerCamelCase_ = tokenizer(UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"] lowerCamelCase_ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(UpperCAmelCase_ ) , self.seq_length ): lowerCamelCase_ = all_token_ids[i : i + self.seq_length] if len(UpperCAmelCase_ ) == self.seq_length: yield torch.tensor(UpperCAmelCase_ ) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = {"streaming": True} lowerCamelCase_ = load_dataset(args.dataset_name , split="train" , **__a ) lowerCamelCase_ = ConstantLengthDataset(__a , __a , seq_length=args.seq_length ) lowerCamelCase_ = DataLoader(__a , batch_size=args.batch_size ) return eval_dataloader def lowerCamelCase_ ( lowerCamelCase__ ): model.eval() lowerCamelCase_ = [] for step, batch in enumerate(__a ): with torch.no_grad(): lowerCamelCase_ = model(__a , labels=__a ) lowerCamelCase_ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__a ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowerCamelCase_ = torch.mean(torch.cat(__a ) ) try: lowerCamelCase_ = torch.exp(__a ) except OverflowError: lowerCamelCase_ = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator __A =Accelerator() # Parse configuration __A =HfArgumentParser(EvaluationArguments) __A =parser.parse_args() set_seed(args.seed) # Logging __A =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer __A =AutoModelForCausalLM.from_pretrained(args.model_ckpt) __A =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader __A =create_dataloader(args) # Prepare everything with our `accelerator`. __A, __A =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') __A, __A =evaluate(args) logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
463
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
0
"""simple docstring""" lowerCAmelCase: Dict ={ "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
607
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[Any] ="[PAD]" lowerCamelCase__: Tuple =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "[PAD]") self.assertEqual(vocab_keys[1] , "[CLS]") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase_) , 1_012) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] ="Hello World!" lowerCamelCase__: Dict =[35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_)) @slow def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
59
0
A_ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" A_ = [{"type": "code", "content": INSTALL_CONTENT}] A_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
393
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small" lowerCamelCase__: Tuple =tempfile.mkdtemp() lowerCamelCase__: Tuple ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: List[str] ="speaker_embeddings_path.json" lowerCamelCase__: int ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Any =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: List[str] =35 lowerCamelCase__: Optional[Any] =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[int] ={ "semantic_prompt": np.ones(UpperCAmelCase_), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: int =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz") np.savez(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_) lowerCamelCase__: List[Any] =processor(text=self.input_string) lowerCamelCase__: Optional[int] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
59
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = CpmAntTokenizer UpperCamelCase_ = False def A__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' super().setUp() lowercase : Optional[int] =[ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] lowercase : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def A__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' lowercase : List[Any] =CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) lowercase : Union[str, Any] ="今天天气真好!" lowercase : int =["今天", "天气", "真", "好", "!"] lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Dict ="今天天气真好!" lowercase : str =[tokenizer.bos_token] + tokens lowercase : Union[str, Any] =[6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ ) lowercase : Tuple =tokenizer.decode(UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
94
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "CLIPImageProcessor" lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: int =kwargs.pop("feature_extractor") lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names lowerCamelCase__: str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
59
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowercase: '''simple docstring''' lowercase__ = BlenderbotSmallConfig lowercase__ = {} lowercase__ = "gelu" def __init__( self: str, a_: Dict, a_: Tuple=13, a_: str=7, a_: Tuple=True, a_: Union[str, Any]=False, a_: Optional[int]=99, a_: Optional[int]=32, a_: Any=2, a_: Dict=4, a_: Optional[int]=37, a_: str=0.1, a_: Optional[Any]=0.1, a_: List[str]=20, a_: Any=2, a_: Dict=1, a_: Tuple=0, ): '''simple docstring''' _snake_case : List[str] = parent _snake_case : Optional[int] = batch_size _snake_case : Tuple = seq_length _snake_case : Tuple = is_training _snake_case : Union[str, Any] = use_labels _snake_case : Optional[Any] = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : int = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : Tuple = intermediate_size _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : List[Any] = max_position_embeddings _snake_case : Tuple = eos_token_id _snake_case : Tuple = pad_token_id _snake_case : Optional[int] = bos_token_id def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) _snake_case : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) _snake_case : List[Any] = tf.concat([input_ids, eos_tensor], axis=1 ) _snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) _snake_case : Optional[Any] = prepare_blenderbot_small_inputs_dict(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) return config, inputs_dict def UpperCamelCase_ ( self: Any, a_: Union[str, Any], a_: str ): '''simple docstring''' _snake_case : Any = TFBlenderbotSmallModel(config=UpperCAmelCase_ ).get_decoder() _snake_case : str = inputs_dict["input_ids"] _snake_case : Optional[int] = input_ids[:1, :] _snake_case : List[str] = inputs_dict["attention_mask"][:1, :] _snake_case : List[str] = inputs_dict["head_mask"] _snake_case : Tuple = 1 # first forward pass _snake_case : Union[str, Any] = model(UpperCAmelCase_, attention_mask=UpperCAmelCase_, head_mask=UpperCAmelCase_, use_cache=UpperCAmelCase_ ) _snake_case : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Dict = ids_tensor((self.batch_size, 3), config.vocab_size ) _snake_case : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and _snake_case : List[str] = tf.concat([input_ids, next_tokens], axis=-1 ) _snake_case : str = tf.concat([attention_mask, next_attn_mask], axis=-1 ) _snake_case : List[str] = model(UpperCAmelCase_, attention_mask=UpperCAmelCase_ )[0] _snake_case : Optional[Any] = model(UpperCAmelCase_, attention_mask=UpperCAmelCase_, past_key_values=UpperCAmelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice _snake_case : List[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) ) _snake_case : Dict = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase_, UpperCAmelCase_, rtol=1E-3 ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int]=None , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , ): """simple docstring""" if attention_mask is None: _snake_case : List[str] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) lowercase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () lowercase__ = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = TFBlenderbotSmallModelTester(self ) _snake_case : Optional[int] = ConfigTester(self, config_class=UpperCAmelCase_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) @require_tokenizers @require_tf class lowercase( unittest.TestCase ): '''simple docstring''' lowercase__ = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] lowercase__ = "facebook/blenderbot_small-90M" @cached_property def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.tokenizer(self.src_text, return_tensors="""tf""" ) _snake_case : Optional[int] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=UpperCAmelCase_, ) _snake_case : Any = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=UpperCAmelCase_ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
609
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" for param in module.parameters(): lowerCamelCase__: Tuple =False def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__: str ="mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[str] =datetime.now() lowerCamelCase__: str =current_time.strftime("%H:%M:%S" ) return timestamp
59
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __lowerCamelCase : Any = get_tests_dir('''fixtures''') __lowerCamelCase : Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/dummy-config.json''') class a__ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 0 def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(UpperCAmelCase_,UpperCAmelCase_ ) def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_,UpperCAmelCase_ ) def __UpperCamelCase ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally SCREAMING_SNAKE_CASE_ : Optional[int] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ).to_dict() config_dict.pop("feature_extractor_type" ) SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaFeatureExtractor(**UpperCAmelCase_ ) # save in new folder model_config.save_pretrained(UpperCAmelCase_ ) config.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE_ : str = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(UpperCAmelCase_,UpperCAmelCase_ ) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_,UpperCAmelCase_ ) def __UpperCamelCase ( self : Any ): """simple docstring""" with self.assertRaisesRegex( UpperCAmelCase_,"bert-base is not a local folder and is not a valid model identifier" ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("bert-base" ) def __UpperCamelCase ( self : int ): """simple docstring""" with self.assertRaisesRegex( UpperCAmelCase_,R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): SCREAMING_SNAKE_CASE_ : List[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_,revision="aaaaaa" ) def __UpperCamelCase ( self : Any ): """simple docstring""" with self.assertRaisesRegex( UpperCAmelCase_,"hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.",): SCREAMING_SNAKE_CASE_ : int = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def __UpperCamelCase ( self : str ): """simple docstring""" with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor",trust_remote_code=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor",trust_remote_code=UpperCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__,"NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : int = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_,trust_remote_code=UpperCAmelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__,"NewFeatureExtractor" ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" try: AutoConfig.register("custom",UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_,UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoFeatureExtractor.register(UpperCAmelCase_,UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE_ : Dict = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_,UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __UpperCamelCase ( self : Any ): """simple docstring""" class a__ ( __SCREAMING_SNAKE_CASE ): A = True try: AutoConfig.register("custom",UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_,UpperCAmelCase_ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__,"NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE_ : Dict = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor",trust_remote_code=UpperCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__,"NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE_ : List[Any] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor",trust_remote_code=UpperCAmelCase_ ) self.assertEqual(feature_extractor.__class__.__name__,"NewFeatureExtractor" ) self.assertTrue(not hasattr(UpperCAmelCase_,"is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
216
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """sentencepiece.bpe.model"""} A = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), } } A = { """facebook/mbart-large-en-ro""": 1_024, """facebook/mbart-large-cc25""": 1_024, } # fmt: off A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class a__ ( __SCREAMING_SNAKE_CASE ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = ["input_ids", "attention_mask"] lowercase_ = [] lowercase_ = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Dict , ): """simple docstring""" __UpperCAmelCase : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token __UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) __UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(UpperCAmelCase_)) __UpperCAmelCase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Optional[Any] = len(self.sp_model) __UpperCAmelCase : int = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_) } __UpperCAmelCase : int = {v: k for k, v in self.lang_code_to_id.items()} __UpperCAmelCase : str = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) __UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __UpperCAmelCase : Optional[Any] = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens]) __UpperCAmelCase : Optional[int] = src_lang if src_lang is not None else "en_XX" __UpperCAmelCase : int = self.lang_code_to_id[self._src_lang] __UpperCAmelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self : str): """simple docstring""" __UpperCAmelCase : str = self.__dict__.copy() __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : Dict = {} __UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def a_ ( self : str): """simple docstring""" return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def a_ ( self : List[Any]): """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : int , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) __UpperCAmelCase : Any = [1] * len(self.prefix_tokens) __UpperCAmelCase : Tuple = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCAmelCase_)) + suffix_ones return prefix_ones + ([0] * len(UpperCAmelCase_)) + ([0] * len(UpperCAmelCase_)) + suffix_ones def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Any): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") __UpperCAmelCase : Any = src_lang __UpperCAmelCase : str = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) __UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase_) __UpperCAmelCase : List[Any] = tgt_lang_id return inputs def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : str = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def a_ ( self : List[Any] , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_) def a_ ( self : Dict , UpperCamelCase_ : List[str]): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Any = self.sp_model.PieceToId(UpperCAmelCase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a_ ( self : Dict , UpperCamelCase_ : Dict): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def a_ ( self : Tuple , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = "".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip() return out_string def a_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCAmelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : str = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCAmelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCAmelCase_ , "wb") as fi: __UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_) return (out_vocab_file,) def a_ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : List[str] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = src_lang __UpperCAmelCase : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) def a_ ( self : str): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self : str): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self : Optional[int] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = self.lang_code_to_id[src_lang] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : str = self.lang_code_to_id[lang] __UpperCAmelCase : int = [] __UpperCAmelCase : int = [self.eos_token_id, self.cur_lang_code]
77
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"] lowercase_ = DistilBertTokenizer def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Optional[int] =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: Any =tokenize_chinese_chars lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: str =do_lower_case def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict: '''simple docstring''' lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
59
0
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = """segformer""" def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[32, 64, 160, 256] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1E-6 , lowercase=256 , lowercase=255 , **lowercase , ): super().__init__(**UpperCAmelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , ) _lowerCamelCase : Union[str, Any] = num_channels _lowerCamelCase : Optional[Any] = num_encoder_blocks _lowerCamelCase : List[Any] = depths _lowerCamelCase : str = sr_ratios _lowerCamelCase : Dict = hidden_sizes _lowerCamelCase : Dict = patch_sizes _lowerCamelCase : str = strides _lowerCamelCase : Dict = mlp_ratios _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = decoder_hidden_size _lowerCamelCase : Tuple = kwargs.get('reshape_last_stage' , UpperCAmelCase_ ) _lowerCamelCase : List[str] = semantic_loss_ignore_index class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = version.parse("""1.11""" ) @property def A_ ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def A_ ( self ): return 1E-4 @property def A_ ( self ): return 12
630
import operator as op def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[Any] =[] lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation lowerCamelCase__: Tuple ={ "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(__a )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__a ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) else: lowerCamelCase__: List[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) lowerCamelCase__: Optional[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) stack.append( str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
59
0
import argparse from collections import defaultdict def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[int]: lowerCAmelCase_ = F'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(__a , 'r' ) as f: lowerCAmelCase_ = f.readlines() lowerCAmelCase_ = F'''class {class_name}(''' lowerCAmelCase_ = F'''{4 * " "}def {test_name}(''' lowerCAmelCase_ = F'''{8 * " "}{correct_line.split()[0]}''' lowerCAmelCase_ = F'''{16 * " "}{correct_line.split()[0]}''' lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = [] for line in lines: if line.startswith(__a ): lowerCAmelCase_ = True elif in_class and line.startswith(__a ): lowerCAmelCase_ = True elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )): lowerCAmelCase_ = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: lowerCAmelCase_ = True if in_class and in_func and in_line: if ")" not in line: continue else: lowerCAmelCase_ = True if in_class and in_func and in_line and insert_line: new_lines.append(F'''{spaces * " "}{correct_line}''' ) lowerCAmelCase_ = False else: new_lines.append(__a ) with open(__a , 'w' ) as f: for line in new_lines: f.write(__a ) def lowerCamelCase ( a_ , a_=None ) -> int: if fail is not None: with open(__a , 'r' ) as f: lowerCAmelCase_ = {l.strip() for l in f.readlines()} else: lowerCAmelCase_ = None with open(__a , 'r' ) as f: lowerCAmelCase_ = f.readlines() lowerCAmelCase_ = defaultdict(__a ) for line in correct_lines: lowerCAmelCase_ = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__a , __a , __a , __a , __a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) lowerCamelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
"""simple docstring""" def A_ ( snake_case_ : Optional[int] ): '''simple docstring''' if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) UpperCamelCase : List[str] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCamelCase : List[Any] = 1 if upper_limit > 0: UpperCamelCase : Tuple = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 ,upper_limit + 1 ): for j in range(__a ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: __A : Union[str, Any] = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
499
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = CustomTokenizer pass
59
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class a ( __SCREAMING_SNAKE_CASE ): snake_case__ = '''deta''' snake_case__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _snake_case=None , _snake_case=9_00 , _snake_case=20_48 , _snake_case=6 , _snake_case=20_48 , _snake_case=8 , _snake_case=6 , _snake_case=10_24 , _snake_case=8 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=2_56 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=True , _snake_case=False , _snake_case="sine" , _snake_case=5 , _snake_case=4 , _snake_case=4 , _snake_case=True , _snake_case=3_00 , _snake_case=True , _snake_case=True , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , _snake_case=0.25 , **_snake_case , ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) lowerCAmelCase = CONFIG_MAPPING["resnet"](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = backbone_config.pop('model_type' ) lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase = config_class.from_dict(UpperCAmelCase_ ) lowerCAmelCase = backbone_config lowerCAmelCase = num_queries lowerCAmelCase = max_position_embeddings lowerCAmelCase = d_model lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = init_xavier_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = auxiliary_loss lowerCAmelCase = position_embedding_type # deformable attributes lowerCAmelCase = num_feature_levels lowerCAmelCase = encoder_n_points lowerCAmelCase = decoder_n_points lowerCAmelCase = two_stage lowerCAmelCase = two_stage_num_proposals lowerCAmelCase = with_box_refine lowerCAmelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher lowerCAmelCase = class_cost lowerCAmelCase = bbox_cost lowerCAmelCase = giou_cost # Loss coefficients lowerCAmelCase = mask_loss_coefficient lowerCAmelCase = dice_loss_coefficient lowerCAmelCase = bbox_loss_coefficient lowerCAmelCase = giou_loss_coefficient lowerCAmelCase = eos_coefficient lowerCAmelCase = focal_alpha super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): """simple docstring""" return self.d_model def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.backbone_config.to_dict() lowerCAmelCase = self.__class__.model_type return output
4
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: List[Any] =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) lowerCamelCase__: Any =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) lowerCamelCase__: Tuple =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Union[str, Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Dict =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""") with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""") lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) if __name__ == "__main__": __A = Accelerator() __A = (accelerator.state.process_index + 2, 10) __A = torch.randint(0, 10, shape).to(accelerator.device) __A = "" __A = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
59
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): lowerCAmelCase__ = 'cvt' def __init__( self , lowercase=3 , lowercase=[7, 3, 3] , lowercase=[4, 2, 2] , lowercase=[2, 1, 1] , lowercase=[64, 192, 384] , lowercase=[1, 3, 6] , lowercase=[1, 2, 10] , lowercase=[4.0, 4.0, 4.0] , lowercase=[0.0, 0.0, 0.0] , lowercase=[0.0, 0.0, 0.0] , lowercase=[0.0, 0.0, 0.1] , lowercase=[True, True, True] , lowercase=[False, False, True] , lowercase=["dw_bn", "dw_bn", "dw_bn"] , lowercase=[3, 3, 3] , lowercase=[1, 1, 1] , lowercase=[2, 2, 2] , lowercase=[1, 1, 1] , lowercase=[1, 1, 1] , lowercase=0.0_2 , lowercase=1e-12 , **lowercase , ) -> str: super().__init__(**UpperCAmelCase_ ) lowerCamelCase_ = num_channels lowerCamelCase_ = patch_sizes lowerCamelCase_ = patch_stride lowerCamelCase_ = patch_padding lowerCamelCase_ = embed_dim lowerCamelCase_ = num_heads lowerCamelCase_ = depth lowerCamelCase_ = mlp_ratio lowerCamelCase_ = attention_drop_rate lowerCamelCase_ = drop_rate lowerCamelCase_ = drop_path_rate lowerCamelCase_ = qkv_bias lowerCamelCase_ = cls_token lowerCamelCase_ = qkv_projection_method lowerCamelCase_ = kernel_qkv lowerCamelCase_ = padding_kv lowerCamelCase_ = stride_kv lowerCamelCase_ = padding_q lowerCamelCase_ = stride_q lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps
463
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): lowerCamelCase__: Any =[image] lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image] lowerCamelCase__: Dict =torch.stack(__a ) return image class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_) lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0) lowerCamelCase__: int =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""") lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") lowerCamelCase__: Dict =init_latents.shape lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) # get latents print("add noise to latents at timestep" , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =init_latents return latents @torch.no_grad() def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase_) # 2. Preprocess image lowerCamelCase__: Dict =preprocess(UpperCAmelCase_) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device) lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device) lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_) # 4. Prepare latent variables lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_) lowerCamelCase__: Tuple =latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase_): # 1. predict noise model_output lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__: Optional[int] =self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1) lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase_)
59
0
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCamelCase__ ( unittest.TestCase ): def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , ) -> Tuple: """simple docstring""" lowercase : Any = size if size is not None else {"height": 1_8, "width": 1_8} lowercase : Tuple = parent lowercase : Dict = batch_size lowercase : Optional[Any] = num_channels lowercase : Union[str, Any] = image_size lowercase : Dict = min_resolution lowercase : int = max_resolution lowercase : Tuple = do_resize lowercase : Tuple = size lowercase : Dict = do_normalize def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): __UpperCAmelCase = ImageGPTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Any: """simple docstring""" lowercase : int = ImageGPTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def _UpperCAmelCase ( self ) -> Any: """simple docstring""" lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) lowercase : str = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> str: """simple docstring""" lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase : Any = os.path.join(UpperCAmelCase_ , """image_processor.json""" ) image_processor_first.to_json_file(UpperCAmelCase_ ) lowercase : Tuple = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict() lowercase : Optional[Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCAmelCase_ ) lowercase : Optional[Any] = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict() lowercase : Tuple = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" pass def __snake_case ( ) -> Dict: lowercase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) lowercase : Union[str, Any] = Image.open(dataset[4]["""file"""] ) lowercase : int = Image.open(dataset[5]["""file"""] ) lowercase : List[Any] = [imagea, imagea] return images @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" lowercase : List[str] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowercase : List[str] = prepare_images() # test non-batched lowercase : Optional[int] = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) lowercase : Optional[int] = [3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ ) # test batched lowercase : str = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) lowercase : Union[str, Any] = [3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
607
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
0
from __future__ import annotations from math import pi def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]: '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
393
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] ={} if "candidate_labels" in kwargs: lowerCamelCase__: Tuple =kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase__: Tuple =kwargs["hypothesis_template"] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str: '''simple docstring''' lowerCamelCase__: int =load_image(UpperCAmelCase_) lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework) lowerCamelCase__: Any =candidate_labels lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels] lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_) lowerCamelCase__: str =[text_inputs] return inputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: int =model_inputs.pop("candidate_labels") lowerCamelCase__: List[str] =model_inputs.pop("text_inputs") if isinstance(text_inputs[0] , UpperCAmelCase_): lowerCamelCase__: List[Any] =text_inputs[0] else: # Batching case. lowerCamelCase__: List[Any] =text_inputs[0][0] lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: str ={ "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels") lowerCamelCase__: Optional[int] =model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1) lowerCamelCase__: Optional[Any] =probs.tolist() if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Optional[int] =[scores] elif self.framework == "tf": lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1) lowerCamelCase__: Optional[int] =probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""") lowerCamelCase__: Optional[int] =[ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0]) ] return result
59
0
'''simple docstring''' import unittest from transformers import DonutProcessor SCREAMING_SNAKE_CASE = 'naver-clova-ix/donut-base' class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : List[Any] ) -> str: '''simple docstring''' lowercase : int =DonutProcessor.from_pretrained(UpperCAmelCase_ ) def A__ ( self : Tuple ) -> str: '''simple docstring''' lowercase : Any ={ "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } lowercase : Tuple =( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) lowercase : Optional[int] =self.processor.tokenajson(UpperCAmelCase_ ) self.assertDictEqual(UpperCAmelCase_ , UpperCAmelCase_ )
94
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 lowercase_ = jnp.floataa lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' super().setup() lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype) def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = FlaxBigBirdForNaturalQuestionsModule def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" def cross_entropy(__a , __a , __a=None ): lowerCamelCase__: Tuple =logits.shape[-1] lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" ) lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 ) lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase__: Optional[Any] =reduction(__a ) return loss lowerCamelCase__: str =partial(__a , reduction=jnp.mean ) lowerCamelCase__: str =cross_entropy(__a , __a ) lowerCamelCase__: Optional[int] =cross_entropy(__a , __a ) lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = "google/bigbird-roberta-base" lowercase_ = 3000 lowercase_ = 1_0500 lowercase_ = 128 lowercase_ = 3 lowercase_ = 1 lowercase_ = 5 # tx_args lowercase_ = 3E-5 lowercase_ = 0.0 lowercase_ = 2_0000 lowercase_ = 0.0095 lowercase_ = "bigbird-roberta-natural-questions" lowercase_ = "training-expt" lowercase_ = "data/nq-training.jsonl" lowercase_ = "data/nq-validation.jsonl" def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir) lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count() @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 4096 # no dynamic padding on TPUs def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_) lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_) return batch def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"]) lowerCamelCase__: Union[str, Any] ={ "input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa), } return batch def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids] return zip(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))] while len(UpperCAmelCase_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def lowerCAmelCase_ ( __a , __a , __a=None ) -> str: """simple docstring""" if seed is not None: lowerCamelCase__: Any =dataset.shuffle(seed=__a ) for i in range(len(__a ) // batch_size ): lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__a ) @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]: """simple docstring""" def loss_fn(__a ): lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" ) lowerCamelCase__: int =model_inputs.pop("end_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs return state.loss_fn( __a , __a , __a , __a , __a , __a , ) lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a ) lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" ) lowerCamelCase__: List[str] =state.apply_gradients(grads=__a ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , **__a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =model_inputs.pop("start_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("end_labels" ) lowerCamelCase__: int =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class _SCREAMING_SNAKE_CASE ( train_state.TrainState ): '''simple docstring''' lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = None def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model.params lowerCamelCase__: Tuple =TrainState.create( apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , ) if ckpt_dir is not None: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_) lowerCamelCase__: str =train_state.TrainState( step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , ) lowerCamelCase__: Tuple =args lowerCamelCase__: Tuple =data_collator lowerCamelCase__: str =lr lowerCamelCase__: Dict =params lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_) return state def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.args lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size lowerCamelCase__: List[str] =jax.random.PRNGKey(0) lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count()) for epoch in range(args.max_epochs): lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_) lowerCamelCase__: Dict =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""): lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step) lowerCamelCase__: List[Any] =running_loss.item() / i lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1) lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(UpperCAmelCase_)) self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size) lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: Optional[Any] =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "): lowerCamelCase__: int =self.data_collator(UpperCAmelCase_) lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int: '''simple docstring''' lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ") self.model_save_fn(UpperCAmelCase_ , params=state.params) with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib")) joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib")) with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f: json.dump({"step": state.step.item()} , UpperCAmelCase_) print("DONE") def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase__: Tuple =from_bytes(state.params , f.read() ) with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() ) lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) ) lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) ) with open(os.path.join(__a , "training_state.json" ) , "r" ) as f: lowerCamelCase__: Optional[Any] =json.load(__a ) lowerCamelCase__: Any =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" lowerCamelCase__: int =num_train_steps - warmup_steps lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a ) lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a ) lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str: """simple docstring""" def weight_decay_mask(__a ): lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a ) lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(__a ) lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a ) lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a ) return tx, lr
59
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase__ = ["note_seq"] def __init__( self: List[str], *a_: Optional[Any], **a_: Union[str, Any] ): '''simple docstring''' requires_backends(self, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: str, *a_: List[str], **a_: Any ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: List[Any], **a_: Dict ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] )
609
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "ChineseCLIPImageProcessor" lowercase_ = ("BertTokenizer", "BertTokenizerFast") def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : str) ->Dict: '''simple docstring''' lowerCamelCase__: str =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: Tuple =kwargs.pop("feature_extractor") lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.image_processor def __call__(self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: List[str] =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: Union[str, Any] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int) ->str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: str =self.tokenizer.model_input_names lowerCamelCase__: Union[str, Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class
59
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) SCREAMING_SNAKE_CASE_ : Any = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !" SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(UpperCAmelCase_ )["last_hidden_state"] SCREAMING_SNAKE_CASE_ : Optional[int] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape,UpperCAmelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],dtype=tf.floataa,) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
216
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Optional[int] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: Tuple =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import random def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = a[left_index] __UpperCAmelCase : Optional[int] = left_index + 1 for j in range(left_index + 1 , __a ): if a[j] < pivot: __UpperCAmelCase : Tuple = a[i], a[j] i += 1 __UpperCAmelCase : Union[str, Any] = a[i - 1], a[left_index] return i - 1 def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if left < right: __UpperCAmelCase : Dict = random.randint(__a , right - 1 ) __UpperCAmelCase : Any = ( a[left], a[pivot], ) # switches the pivot with the left most bound __UpperCAmelCase : Any = partition(__a , __a , __a ) quick_sort_random( __a , __a , __a ) # recursive quicksort to the left of the pivot point quick_sort_random( __a , pivot_index + 1 , __a ) # recursive quicksort to the right of the pivot point def _UpperCamelCase ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : List[Any] = input("Enter numbers separated by a comma:\n" ).strip() __UpperCAmelCase : Optional[Any] = [int(__a ) for item in user_input.split("," )] quick_sort_random(__a , 0 , len(__a ) ) print(__a ) if __name__ == "__main__": main()
77
def lowerCAmelCase_ ( __a = 50000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =set() lowerCamelCase__: int =int((limit - 24) ** (1 / 2) ) lowerCamelCase__: Tuple =set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __a ) ) ) for primea in primes: lowerCamelCase__: Optional[int] =primea * primea for primea in primes: lowerCamelCase__: List[str] =primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCamelCase__: int =primea * primea * primea * primea lowerCamelCase__: Optional[Any] =square + cube + tetr if total >= limit: break ret.add(__a ) return len(__a ) if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): warnings.warn( 'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
630
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: List[str] =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
59
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { """configuration_mobilebert""": [ """MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileBertConfig""", """MobileBertOnnxConfig""", ], """tokenization_mobilebert""": ["""MobileBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["""MobileBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MobileBertForMaskedLM""", """MobileBertForMultipleChoice""", """MobileBertForNextSentencePrediction""", """MobileBertForPreTraining""", """MobileBertForQuestionAnswering""", """MobileBertForSequenceClassification""", """MobileBertForTokenClassification""", """MobileBertLayer""", """MobileBertModel""", """MobileBertPreTrainedModel""", """load_tf_weights_in_mobilebert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ """TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFMobileBertForMaskedLM""", """TFMobileBertForMultipleChoice""", """TFMobileBertForNextSentencePrediction""", """TFMobileBertForPreTraining""", """TFMobileBertForQuestionAnswering""", """TFMobileBertForSequenceClassification""", """TFMobileBertForTokenClassification""", """TFMobileBertMainLayer""", """TFMobileBertModel""", """TFMobileBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
318
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( __a ) -> float: """simple docstring""" return np.dot(__a , __a ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : List[str] , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None: '''simple docstring''' lowerCamelCase__: Dict =regularization lowerCamelCase__: Any =gamma if kernel == "linear": lowerCamelCase__: Dict =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") if not isinstance(self.gamma , (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") lowerCamelCase__: Tuple =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None: '''simple docstring''' lowerCamelCase__: Optional[Any] =observations lowerCamelCase__: Optional[int] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: lowerCamelCase__: int =0 ((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0) lowerCamelCase__: str =Bounds(0 , self.regularization) lowerCamelCase__: Union[str, Any] =minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x lowerCamelCase__: str =l_star # calculating mean offset of separation plane to points lowerCamelCase__: Tuple =0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) lowerCamelCase__: int =s / n def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import numpy as np from transformers import Pipeline def A_ ( snake_case_ : Union[str, Any] ): '''simple docstring''' UpperCamelCase : Optional[Any] = np.max(__a ,axis=-1 ,keepdims=__a ) UpperCamelCase : int = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__a ) class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): def a_ ( self , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = {} if "second_text" in kwargs: UpperCamelCase : Optional[int] = kwargs["second_text"] return preprocess_kwargs, {}, {} def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): return self.model(**UpperCAmelCase_ ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = model_outputs.logits[0].numpy() UpperCamelCase : Any = softmax(UpperCAmelCase_ ) UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ ) UpperCamelCase : Optional[Any] = self.model.config.idalabel[best_class] UpperCamelCase : str = probabilities[best_class].item() UpperCamelCase : List[str] = logits.tolist() return {"label": label, "score": score, "logits": logits}
499
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str: """simple docstring""" lowerCamelCase__: int =bnb_quantization_config.load_in_abit lowerCamelCase__: Any =bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) lowerCamelCase__: List[Any] =[] # custom device map if isinstance(__a , __a ) and len(device_map.keys() ) > 1: lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase__: Any =get_keys_to_not_convert(__a ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__a ) lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__a ) # compatibility with peft lowerCamelCase__: List[str] =load_in_abit lowerCamelCase__: int =load_in_abit lowerCamelCase__: Tuple =get_parameter_device(__a ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a ) # convert param to the right dtype lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" ) lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__a ): param.to(__a ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCamelCase__: str =replace_with_bnb_layers( __a , __a , modules_to_not_convert=__a ) lowerCamelCase__: Optional[Any] =get_quantized_model_device_map( __a , __a , __a , max_memory=__a , no_split_module_classes=__a , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase__: Any =True lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__a , device_map=__a , offload_dir=__a ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str: """simple docstring""" if device_map is None: if torch.cuda.is_available(): lowerCamelCase__: str ={"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__a , __a ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) lowerCamelCase__: Optional[int] ={} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase__: Optional[Any] ={} lowerCamelCase__: str =special_dtypes lowerCamelCase__: List[str] =no_split_module_classes lowerCamelCase__: Dict =bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase__: Optional[Any] =get_balanced_memory( __a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , ) lowerCamelCase__: Union[str, Any] =max_memory lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a ) if isinstance(__a , __a ): # check if don't have any quantized module on the cpu lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase__: List[Any] ={ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers( __a , __a , __a , __a ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]: """simple docstring""" lowerCamelCase__: Optional[int] =False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase__: Optional[Any] =[] current_key_name.append(__a ) if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase__: List[str] =".".join(__a ) lowerCamelCase__: Optional[Any] =True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase__: int =False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase__: Dict =bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) lowerCamelCase__: Dict =module.weight.data if module.bias is not None: lowerCamelCase__: List[Any] =module.bias.data bnb_module.requires_grad_(__a ) setattr(__a , __a , __a ) lowerCamelCase__: int =True if len(list(module.children() ) ) > 0: lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers( __a , __a , __a , __a ) lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" with init_empty_weights(): lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase__: str =find_tied_parameters(__a ) # For compatibility with Accelerate < 0.18 if isinstance(__a , __a ): lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase__: str =sum(__a , [] ) lowerCamelCase__: str =len(__a ) > 0 # Check if it is a base model lowerCamelCase__: Optional[Any] =False if hasattr(__a , "base_model_prefix" ): lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase__: Optional[int] =list(model.named_children() ) lowerCamelCase__: Optional[int] =[list_modules[-1][0]] # add last module together with tied weights lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a ) lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a ) # remove ".weight" from the keys lowerCamelCase__: List[Any] =[".weight", ".bias"] lowerCamelCase__: Tuple =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase__: Optional[Any] =name.replace(__a , "" ) filtered_module_names.append(__a ) return filtered_module_names def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" for m in model.modules(): if isinstance(__a , bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" return next(parameter.parameters() ).device def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a ) lowerCamelCase__: Dict =param_name lowerCamelCase__: Tuple =model if "." in tensor_name: lowerCamelCase__: Any =tensor_name.split("." ) for split in splits[:-1]: lowerCamelCase__: Any =getattr(__a , __a ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCamelCase__: str =new_module lowerCamelCase__: int =splits[-1] # offload weights lowerCamelCase__: str =False offload_weight(module._parameters[tensor_name] , __a , __a , index=__a ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , ) else: offload_weight(__a , __a , __a , index=__a ) offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a ) set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
59
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 3 , _UpperCAmelCase : List[Any] = 7 , _UpperCAmelCase : Dict = 100_0000 ): lowerCAmelCase = 0 lowerCAmelCase = 1 for current_denominator in range(1 , limit + 1 ): lowerCAmelCase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: lowerCAmelCase = current_numerator lowerCAmelCase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=100_0000))
4
from __future__ import annotations from math import pi def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
59
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=36 , lowercase=3 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1000 , ) -> Optional[int]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = num_channels lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = text_seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = coordinate_size lowerCamelCase_ = shape_size lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowerCamelCase_ = text_seq_length lowerCamelCase_ = (image_size // patch_size) ** 2 + 1 lowerCamelCase_ = self.text_seq_length + self.image_seq_length def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowerCamelCase_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: lowerCamelCase_ = LayoutLMvaModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() # text + image lowerCamelCase_ = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ ) lowerCamelCase_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCamelCase_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) lowerCamelCase_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowerCamelCase_ = model(UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowerCamelCase_ = model(pixel_values=UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: lowerCamelCase_ = self.num_labels lowerCamelCase_ = LayoutLMvaForSequenceClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: lowerCamelCase_ = self.num_labels lowerCamelCase_ = LayoutLMvaForTokenClassification(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: lowerCamelCase_ = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = self.prepare_config_and_inputs() ( lowerCamelCase_ ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple: return True def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = LayoutLMvaModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Dict: lowerCamelCase_ = copy.deepcopy(UpperCAmelCase_ ) if model_class in get_values(UpperCAmelCase_ ): lowerCamelCase_ = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase_ ): lowerCamelCase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in get_values(UpperCAmelCase_ ): lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in [ *get_values(UpperCAmelCase_ ), ]: lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in [ *get_values(UpperCAmelCase_ ), ]: lowerCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , ) return inputs_dict def SCREAMING_SNAKE_CASE_( self ) -> Dict: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ = type self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) @slow def SCREAMING_SNAKE_CASE_( self ) -> str: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = LayoutLMvaModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCamelCase_ ( ): lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(UpperCAmelCase_ ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ ) lowerCamelCase_ = torch.tensor([[1, 2]] ) lowerCamelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowerCamelCase_ = model( input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , ) # verify the logits lowerCamelCase_ = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ ) lowerCamelCase_ = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
463
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
0
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCAmelCase: List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( __A ) -> List[Any]: warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" ,__a ,) if isinstance(__a ,torch.Tensor ): return image elif isinstance(__a ,PIL.Image.Image ): lowercase : Dict = [image] if isinstance(image[0] ,PIL.Image.Image ): lowercase : Tuple = image[0].size lowercase : Optional[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowercase : Optional[int] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] lowercase : Tuple = np.concatenate(__a ,axis=0 ) lowercase : int = np.array(__a ).astype(np.floataa ) / 255.0 lowercase : int = image.transpose(0 ,3 ,1 ,2 ) lowercase : Optional[int] = 2.0 * image - 1.0 lowercase : Tuple = torch.from_numpy(__a ) elif isinstance(image[0] ,torch.Tensor ): lowercase : List[str] = torch.cat(__a ,dim=0 ) return image def __snake_case ( __A ) -> Any: if isinstance(__a ,torch.Tensor ): return mask elif isinstance(__a ,PIL.Image.Image ): lowercase : Dict = [mask] if isinstance(mask[0] ,PIL.Image.Image ): lowercase : Union[str, Any] = mask[0].size lowercase : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase : Union[str, Any] = [np.array(m.convert("""L""" ).resize((w, h) ,resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] lowercase : int = np.concatenate(__a ,axis=0 ) lowercase : str = mask.astype(np.floataa ) / 255.0 lowercase : int = 0 lowercase : Optional[Any] = 1 lowercase : Tuple = torch.from_numpy(__a ) elif isinstance(mask[0] ,torch.Tensor ): lowercase : Dict = torch.cat(__a ,dim=0 ) return mask class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 def __init__( self , snake_case , snake_case ) -> int: """simple docstring""" super().__init__() self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) @torch.no_grad() def __call__( self , snake_case , snake_case , snake_case = 2_5_0 , snake_case = 0.0 , snake_case = 1_0 , snake_case = 1_0 , snake_case = None , snake_case = "pil" , snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" lowercase : Tuple = image lowercase : List[str] = _preprocess_image(UpperCAmelCase_ ) lowercase : Union[str, Any] = original_image.to(device=self.device , dtype=self.unet.dtype ) lowercase : Optional[int] = _preprocess_mask(UpperCAmelCase_ ) lowercase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowercase : Optional[Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowercase : int = original_image.shape lowercase : Tuple = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.device ) lowercase : List[str] = eta lowercase : Optional[int] = self.scheduler.timesteps[0] + 1 lowercase : Union[str, Any] = generator[0] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowercase : Union[str, Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample # compute previous image: x_t -> x_t-1 lowercase : Optional[Any] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowercase : Optional[Any] = self.scheduler.undo_step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : List[Any] = t lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 ) lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : int = self.numpy_to_pil(UpperCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase_ )
607
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[Any] ="[PAD]" lowerCamelCase__: Tuple =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "[PAD]") self.assertEqual(vocab_keys[1] , "[CLS]") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase_) , 1_012) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] ="Hello World!" lowerCamelCase__: Dict =[35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_)) @slow def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
59
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) A_ = "\\n Text data.\n Second line of data." A_ = "file" @pytest.fixture(scope='''session''' ) def UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + ".zstd") SCREAMING_SNAKE_CASE_ = bytes(__a ,'''utf-8''' ) with zstd.open(__a ,'''wb''' ) as f: f.write(__a ) return path @pytest.fixture def UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir ,__a ) ,'''w''' ) as f: f.write(__a ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} SCREAMING_SNAKE_CASE_ = input_paths[compression_format] SCREAMING_SNAKE_CASE_ = tmp_path / "cache" SCREAMING_SNAKE_CASE_ = DownloadConfig(cache_dir=__a ,extract_compressed_file=__a ) SCREAMING_SNAKE_CASE_ = cached_path(__a ,download_config=__a ) with open(__a ) as f: SCREAMING_SNAKE_CASE_ = f.read() with open(__a ) as f: SCREAMING_SNAKE_CASE_ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' ,[True, False] ) @pytest.mark.parametrize('''default_cache_dir''' ,[True, False] ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_ = "custom_cache" SCREAMING_SNAKE_CASE_ = "custom_extracted_dir" SCREAMING_SNAKE_CASE_ = tmp_path / "custom_extracted_path" if default_extracted: SCREAMING_SNAKE_CASE_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,__a ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(__a ) ) SCREAMING_SNAKE_CASE_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) SCREAMING_SNAKE_CASE_ = xz_file SCREAMING_SNAKE_CASE_ = ( DownloadConfig(extract_compressed_file=__a ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=__a ) ) SCREAMING_SNAKE_CASE_ = cached_path(__a ,download_config=__a ) assert Path(__a ).parent.parts[-2:] == expected def UpperCAmelCase ( UpperCAmelCase )-> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = str(Path(__a ).resolve() ) assert cached_path(__a ) == text_file # relative path SCREAMING_SNAKE_CASE_ = str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__a ) == text_file def UpperCAmelCase ( UpperCAmelCase )-> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(__a ): cached_path(__a ) # relative path SCREAMING_SNAKE_CASE_ = "./__missing_file__.txt" with pytest.raises(__a ): cached_path(__a ) def UpperCAmelCase ( UpperCAmelCase )-> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = get_from_cache(f'''tmp://{tmpfs_file}''' ) with open(__a ) as f: SCREAMING_SNAKE_CASE_ = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a ) def UpperCAmelCase ( )-> Union[str, Any]: '''simple docstring''' with pytest.raises(__a ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a ) def UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''data''' ) / "file.html" with pytest.raises(__a ): http_get('''https://huggingface.co''' ,temp_file=__a ) with pytest.raises(__a ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a ) def UpperCAmelCase ( UpperCAmelCase )-> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''data''' ) / "file.html" with pytest.raises(__a ): ftp_get('''ftp://huggingface.co''' ,temp_file=__a ) with pytest.raises(__a ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a ) def UpperCAmelCase ( UpperCAmelCase )-> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''data''' ) / "file.html" with pytest.raises(__a ): fsspec_get('''s3://huggingface.co''' ,temp_file=__a ) with pytest.raises(__a ): fsspec_head('''s3://huggingface.co''' )
393
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small" lowerCamelCase__: Tuple =tempfile.mkdtemp() lowerCamelCase__: Tuple ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: List[str] ="speaker_embeddings_path.json" lowerCamelCase__: int ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Any =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: List[str] =35 lowerCamelCase__: Optional[Any] =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[int] ={ "semantic_prompt": np.ones(UpperCAmelCase_), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: int =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz") np.savez(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_) lowerCamelCase__: List[Any] =processor(text=self.input_string) lowerCamelCase__: Optional[int] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
59
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def lowercase_ ( __A : Any , __A : List[Any] , __A : Optional[int] , __A : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : Optional[Any] =original_name.split('''.''' )[0] lowercase : Any =key.split('''.''' ) lowercase : Optional[Any] =int(key_list[key_list.index(__a ) - 2] ) lowercase : List[str] =int(key_list[key_list.index(__a ) - 1] ) lowercase : Union[str, Any] =orig_block_num - offset lowercase : List[str] =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' ) return key def lowercase_ ( __A : Dict ) -> List[str]: """simple docstring""" lowercase : Union[str, Any] =OrderedDict() lowercase : int =0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase : Union[str, Any] =key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase : List[Any] =key[: key.find('''proj''' )] lowercase : Optional[Any] =key.replace(__a , F'patch_embeddings.{total_embed_found}.' ) lowercase : List[str] =key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase : Tuple ="poolformer.encoder." + key if "mlp.fc1" in key: lowercase : Union[str, Any] =replace_key_with_offset(__a , __a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase : Optional[int] =replace_key_with_offset(__a , __a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase : Union[str, Any] =replace_key_with_offset(__a , __a , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase : List[str] =replace_key_with_offset(__a , __a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase : str =replace_key_with_offset(__a , __a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase : Any =replace_key_with_offset(__a , __a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase : int =key.replace('''head''' , '''classifier''' ) lowercase : List[str] =value return new_state_dict def lowercase_ ( ) -> List[Any]: """simple docstring""" lowercase : Optional[int] ="http://images.cocodataset.org/val2017/000000039769.jpg" lowercase : Optional[int] =Image.open(requests.get(__a , stream=__a ).raw ) return image @torch.no_grad() def lowercase_ ( __A : str , __A : int , __A : List[Any] ) -> Any: """simple docstring""" lowercase : Any =PoolFormerConfig() # set attributes based on model_name lowercase : int ="huggingface/label-files" lowercase : Any =model_name[-3:] lowercase : int =1_0_0_0 lowercase : List[Any] ="imagenet-1k-id2label.json" lowercase : Any =(1, 1_0_0_0) # set config attributes lowercase : Optional[Any] =json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) ) lowercase : Dict ={int(__a ): v for k, v in idalabel.items()} lowercase : Optional[int] =idalabel lowercase : int ={v: k for k, v in idalabel.items()} if size == "s12": lowercase : Optional[int] =[2, 2, 6, 2] lowercase : List[Any] =[6_4, 1_2_8, 3_2_0, 5_1_2] lowercase : Optional[Any] =4.0 lowercase : int =0.9 elif size == "s24": lowercase : List[str] =[4, 4, 1_2, 4] lowercase : str =[6_4, 1_2_8, 3_2_0, 5_1_2] lowercase : Any =4.0 lowercase : str =0.9 elif size == "s36": lowercase : Any =[6, 6, 1_8, 6] lowercase : Optional[int] =[6_4, 1_2_8, 3_2_0, 5_1_2] lowercase : int =4.0 lowercase : Dict =1E-6 lowercase : Any =0.9 elif size == "m36": lowercase : Union[str, Any] =[6, 6, 1_8, 6] lowercase : Optional[Any] =[9_6, 1_9_2, 3_8_4, 7_6_8] lowercase : Tuple =4.0 lowercase : Union[str, Any] =1E-6 lowercase : Optional[int] =0.95 elif size == "m48": lowercase : Optional[Any] =[8, 8, 2_4, 8] lowercase : str =[9_6, 1_9_2, 3_8_4, 7_6_8] lowercase : Optional[int] =4.0 lowercase : Dict =1E-6 lowercase : Any =0.95 else: raise ValueError(F'Size {size} not supported' ) # load image processor lowercase : str =PoolFormerImageProcessor(crop_pct=__a ) # Prepare image lowercase : Optional[int] =prepare_img() lowercase : Optional[int] =image_processor(images=__a , return_tensors='''pt''' ).pixel_values logger.info(F'Converting model {model_name}...' ) # load original state dict lowercase : List[str] =torch.load(__a , map_location=torch.device('''cpu''' ) ) # rename keys lowercase : List[Any] =rename_keys(__a ) # create HuggingFace model and load state dict lowercase : List[str] =PoolFormerForImageClassification(__a ) model.load_state_dict(__a ) model.eval() # Define image processor lowercase : Optional[int] =PoolFormerImageProcessor(crop_pct=__a ) lowercase : Optional[int] =image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase : List[Any] =model(__a ) lowercase : Any =outputs.logits # define expected logit slices for different models if size == "s12": lowercase : Optional[int] =torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": lowercase : Union[str, Any] =torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": lowercase : Dict =torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": lowercase : Tuple =torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": lowercase : Dict =torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(F'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , __a , atol=1E-2 ) # finally, save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
94
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "CLIPImageProcessor" lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: int =kwargs.pop("feature_extractor") lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names lowerCamelCase__: str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
59
0
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class lowercase( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase__ = "t5" lowercase__ = ["past_key_values"] lowercase__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self: List[str], a_: int=32_128, a_: Dict=512, a_: List[Any]=64, a_: int=2_048, a_: Optional[Any]=6, a_: List[Any]=None, a_: Optional[int]=8, a_: Dict=32, a_: List[str]=128, a_: Dict=0.1, a_: int=1E-6, a_: Any=1.0, a_: Optional[Any]="relu", a_: Tuple=True, a_: Optional[Any]=True, a_: Any=0, a_: str=1, **a_: str, ): '''simple docstring''' _snake_case : int = vocab_size _snake_case : Optional[int] = d_model _snake_case : List[str] = d_kv _snake_case : Optional[Any] = d_ff _snake_case : Tuple = num_layers _snake_case : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _snake_case : Optional[int] = num_heads _snake_case : Optional[Any] = relative_attention_num_buckets _snake_case : Optional[int] = relative_attention_max_distance _snake_case : Tuple = dropout_rate _snake_case : Union[str, Any] = layer_norm_epsilon _snake_case : str = initializer_factor _snake_case : List[str] = feed_forward_proj _snake_case : str = use_cache _snake_case : Dict = self.feed_forward_proj.split("""-""" ) _snake_case : Optional[int] = act_info[-1] _snake_case : Optional[Any] = act_info[0] == "gated" if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _snake_case : Union[str, Any] = "gelu_new" super().__init__( pad_token_id=UpperCAmelCase_, eos_token_id=UpperCAmelCase_, is_encoder_decoder=UpperCAmelCase_, **UpperCAmelCase_, ) class lowercase( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: _snake_case : Tuple = "past_encoder_sequence + sequence" _snake_case : str = {0: "batch"} _snake_case : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: _snake_case : Optional[int] = {0: "batch", 1: "decoder_sequence"} _snake_case : Optional[int] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_, direction="""inputs""" ) return common_inputs @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return 13
609
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" for param in module.parameters(): lowerCamelCase__: Tuple =False def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__: str ="mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[str] =datetime.now() lowerCamelCase__: str =current_time.strftime("%H:%M:%S" ) return timestamp
59
0
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case ( lowerCAmelCase : Tuple=3_2 , lowerCAmelCase : Optional[Any]=1_0 , lowerCAmelCase : Optional[int]=1_0_0 , lowerCAmelCase : str=1_0_2_6 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase : Any="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE_ : Tuple = generate_datasets( __a , __a , number=__a , min_len=1_0_2_6 , trim=__a ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE_ : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model SCREAMING_SNAKE_CASE_ : List[Any] = load_gpta("gpt2" ).to(__a ) print("computing perplexity on objective set" ) SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(__a , __a , __a ).item() print("perplexity on objective set:" , __a ) # collect igf pairs and save to file demo.jbl collect_objective_set(__a , __a , __a , __a , __a , __a , __a , __a ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : List[Any]=1_5 , lowerCAmelCase : Any=1_2_8 , lowerCAmelCase : Dict=1_0_0 , lowerCAmelCase : List[str]="igf_model.pt" , ): """simple docstring""" set_seed(4_2 ) # Load pre-trained model SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE_ : str = SecondaryLearner(__a ) # Train secondary learner SCREAMING_SNAKE_CASE_ : List[Any] = train_secondary_learner( __a , __a , max_epochs=__a , batch_size=__a , eval_freq=1_0_0 , igf_model_path=__a , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=3_2 , lowerCAmelCase : Any=1_0_0_0 , lowerCAmelCase : Optional[Any]=1_6 , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : Tuple=recopy_gpta , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=1_0 , lowerCAmelCase : Dict="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) SCREAMING_SNAKE_CASE_ : List[str] = RandomSampler(__a ) SCREAMING_SNAKE_CASE_ : Any = DataLoader(__a , sampler=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = max_steps // (len(__a )) + 1 SCREAMING_SNAKE_CASE_ : List[Any] = 0 SCREAMING_SNAKE_CASE_ : int = torch.zeros((1, context_len) , dtype=torch.long , device=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = recopy_model(__a , __a , __a ) model.train() if secondary_learner is not None: secondary_learner.to(__a ) secondary_learner.eval() SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : List[Any] = 0 SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : str = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE_ : int = compute_perplexity(__a , __a , __a ) test_perps.append(__a ) print("Test perplexity, step" , __a , ":" , __a ) for epoch in range(int(__a ) ): for step, example in enumerate(__a ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE_ : List[Any] = model(__a , labels=__a ) SCREAMING_SNAKE_CASE_ : Any = True if secondary_learner is not None: SCREAMING_SNAKE_CASE_ : Dict = secondary_learner.forward( torch.tensor(__a , dtype=torch.long , device=__a ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__a ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 1_0: SCREAMING_SNAKE_CASE_ : Optional[Any] = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE_ : Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE_ : Any = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ : Optional[int] = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE_ : List[str] = compute_perplexity(__a , __a , __a ) test_perps.append(__a ) print("Test perplexity, step" , __a , ":" , __a ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 6_0: break if max_steps > 0 and global_step > 6_0: break # save finetuned transformer model torch.save(model.state_dict() , __a ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=__a , type=__a , required=__a , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=__a , default=__a , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=__a , default=__a , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=__a , type=__a , required=__a , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=__a , type=__a , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=__a , default=__a , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=3_2 , type=__a , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=1_0_0 , type=__a , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=1_0_0 , type=__a , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1_0_0_0 , type=__a , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=1_2_8 , type=__a , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=1_6 , type=__a , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=1_0 , type=__a , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=1_0_0 , type=__a , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1_0_2_6 , type=__a , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=1_5 , type=__a , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=__a , type=__a , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=__a , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__a , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=__a , type=__a , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=__a , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE_ : List[str] = joblib.load("data/IGF_values.jbl" ) # Train secondary learner SCREAMING_SNAKE_CASE_ : Optional[Any] = training_secondary_learner( __a , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE_ : Tuple = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(4_2 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_datasets( context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=__a ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __a , __a , __a , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=__a , secondary_learner=__a , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
216
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" A = tuple[float, float, float] A = tuple[float, float, float] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Vectorad: """simple docstring""" __UpperCAmelCase : Optional[int] = end_pointa[0] - end_pointa[0] __UpperCAmelCase : Any = end_pointa[1] - end_pointa[1] __UpperCAmelCase : str = end_pointa[2] - end_pointa[2] return (x, y, z) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Vectorad: """simple docstring""" __UpperCAmelCase : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i __UpperCAmelCase : Tuple = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __UpperCAmelCase : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool: """simple docstring""" return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 10 ) -> bool: """simple docstring""" __UpperCAmelCase : List[Any] = create_vector(__a , __a ) __UpperCAmelCase : Dict = create_vector(__a , __a ) return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a )
77
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"] lowercase_ = DistilBertTokenizer def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Optional[int] =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: Any =tokenize_chinese_chars lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: str =do_lower_case def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict: '''simple docstring''' lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
59
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = """gptj""" lowerCamelCase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=50400 , lowercase=2048 , lowercase=4096 , lowercase=28 , lowercase=16 , lowercase=64 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=50256 , lowercase=50256 , lowercase=False , **lowercase , ): _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Union[str, Any] = n_positions _lowerCamelCase : int = n_embd _lowerCamelCase : Union[str, Any] = n_layer _lowerCamelCase : Optional[Any] = n_head _lowerCamelCase : str = n_inner _lowerCamelCase : int = rotary_dim _lowerCamelCase : List[str] = activation_function _lowerCamelCase : int = resid_pdrop _lowerCamelCase : Any = embd_pdrop _lowerCamelCase : Union[str, Any] = attn_pdrop _lowerCamelCase : List[Any] = layer_norm_epsilon _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[Any] = use_cache _lowerCamelCase : Any = bos_token_id _lowerCamelCase : Tuple = eos_token_id super().__init__( bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ ) class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ): super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_ ) if not getattr(self._config , 'pad_token_id' , UpperCAmelCase_ ): # TODO: how to do that better? _lowerCamelCase : Optional[int] = 0 @property def A_ ( self ): _lowerCamelCase : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs' ) _lowerCamelCase : int = {0: "batch", 1: "past_sequence + sequence"} else: _lowerCamelCase : List[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def A_ ( self ): return self._config.n_layer @property def A_ ( self ): return self._config.n_head def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ): _lowerCamelCase : List[Any] = super(UpperCAmelCase_ , self ).generate_dummy_inputs( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) # We need to order the input in the way they appears in the forward() _lowerCamelCase : str = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCamelCase : List[str] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCamelCase : List[Any] = seqlen + 2 _lowerCamelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCamelCase : str = [ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers ) ] _lowerCamelCase : Dict = common_inputs["attention_mask"] if self.use_past: _lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype _lowerCamelCase : str = torch.cat( [ordered_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 ) return ordered_inputs @property def A_ ( self ): return 13
630
import operator as op def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[Any] =[] lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation lowerCamelCase__: Tuple ={ "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(__a )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__a ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) else: lowerCamelCase__: List[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) lowerCamelCase__: Optional[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) stack.append( str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
59
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class a_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' __a: Optional[int] = '''focalnet''' def __init__( self , lowercase_=2_2_4 , lowercase_=4 , lowercase_=3 , lowercase_=9_6 , lowercase_=False , lowercase_=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , lowercase_=[2, 2, 6, 2] , lowercase_=[2, 2, 2, 2] , lowercase_=[3, 3, 3, 3] , lowercase_="gelu" , lowercase_=4.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=False , lowercase_=1e-4 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=3_2 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase_ ) lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = use_conv_embed lowerCAmelCase_ = hidden_sizes lowerCAmelCase_ = depths lowerCAmelCase_ = focal_levels lowerCAmelCase_ = focal_windows lowerCAmelCase_ = hidden_act lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = use_layerscale lowerCAmelCase_ = layerscale_value lowerCAmelCase_ = use_post_layernorm lowerCAmelCase_ = use_post_layernorm_in_modulation lowerCAmelCase_ = normalize_modulator lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = encoder_stride lowerCAmelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): lowercase : List[str] = 'facebook/bart-large-mnli' lowercase : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) lowercase : int = 'text_classifier' lowercase : Tuple = AutoTokenizer lowercase : str = AutoModelForSequenceClassification lowercase : int = ['text', ['text']] lowercase : List[str] = ['text'] def a_ ( self ): super().setup() UpperCamelCase : Optional[int] = self.model.config UpperCamelCase : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail""" ): UpperCamelCase : Union[str, Any] = int(UpperCAmelCase_ ) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" ) def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = labels return self.pre_processor( [text] * len(UpperCAmelCase_ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[Any] = outputs.logits UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
499
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = CustomTokenizer pass
59
0
"""simple docstring""" # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase = F'{src_lang}-{tgt_lang}' lowerCAmelCase = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(__a , exist_ok=__a ) lowerCAmelCase = os.path.join(__a , 'README.md' ) print(F'Generating {path}' ) with open(__a , 'w' , encoding='utf-8' ) as f: f.write(__a ) # make sure we are under the root of the project __UpperCamelCase : str = Path(__file__).resolve().parent.parent.parent __UpperCamelCase : Any = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Tuple = model_name.split('''-''') __UpperCamelCase : str = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
4
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: List[Any] =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) lowerCamelCase__: Any =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) lowerCamelCase__: Tuple =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Union[str, Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Dict =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""") with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""") lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) if __name__ == "__main__": __A = Accelerator() __A = (accelerator.state.process_index + 2, 10) __A = torch.randint(0, 10, shape).to(accelerator.device) __A = "" __A = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
59
0
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# __A =[ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] __A =[ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] __A =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks __A =F"""down_blocks.{i}.resnets.{j}.""" __A =F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 __A =F"""down_blocks.{i}.attentions.{j}.""" __A =F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks __A =F"""up_blocks.{i}.resnets.{j}.""" __A =F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 __A =F"""up_blocks.{i}.attentions.{j}.""" __A =F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 __A =F"""down_blocks.{i}.downsamplers.0.conv.""" __A =F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 __A =F"""up_blocks.{i}.upsamplers.0.""" __A =F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) __A ='''mid_block.attentions.0.''' __A ='''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): __A =F"""mid_block.resnets.{j}.""" __A =F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCamelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCamelCase_ = v.replace(__a , __a ) lowerCamelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCamelCase_ = v.replace(__a , __a ) lowerCamelCase_ = v lowerCamelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# __A =[ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): __A =F"""encoder.down_blocks.{i}.resnets.{j}.""" __A =F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: __A =F"""down_blocks.{i}.downsamplers.0.""" __A =F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) __A =F"""up_blocks.{i}.upsamplers.0.""" __A =F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): __A =F"""decoder.up_blocks.{i}.resnets.{j}.""" __A =F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): __A =F"""mid_block.resnets.{i}.""" __A =F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) __A =[ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def lowerCamelCase_ ( lowerCamelCase__ ): return w.reshape(*w.shape , 1 , 1 ) def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCamelCase_ = v.replace(__a , __a ) lowerCamelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCamelCase_ = v.replace(__a , __a ) lowerCamelCase_ = v lowerCamelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCamelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'mid.attn_1.{weight_name}.weight' in k: print(F'Reshaping {k} for SD format' ) lowerCamelCase_ = reshape_weight_for_sd(__a ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# __A =[ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] __A ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} __A =re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp __A ={'''q''': 0, '''k''': 1, '''v''': 2} def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = {} lowerCamelCase_ = {} lowerCamelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): lowerCamelCase_ = k[: -len(".q_proj.weight" )] lowerCamelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: lowerCamelCase_ = [None, None, None] lowerCamelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): lowerCamelCase_ = k[: -len(".q_proj.bias" )] lowerCamelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: lowerCamelCase_ = [None, None, None] lowerCamelCase_ = v continue lowerCamelCase_ = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , __a ) lowerCamelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowerCamelCase_ = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , __a ) lowerCamelCase_ = torch.cat(__a ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowerCamelCase_ = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , __a ) lowerCamelCase_ = torch.cat(__a ) return new_state_dict def lowerCamelCase_ ( lowerCamelCase__ ): return text_enc_dict if __name__ == "__main__": __A =argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) __A =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors __A =osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') __A =osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') __A =osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): __A =load_file(unet_path, device='''cpu''') else: __A =osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') __A =torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): __A =load_file(vae_path, device='''cpu''') else: __A =osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') __A =torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): __A =load_file(text_enc_path, device='''cpu''') else: __A =osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') __A =torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model __A =convert_unet_state_dict(unet_state_dict) __A ={'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model __A =convert_vae_state_dict(vae_state_dict) __A ={'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper __A ='''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm __A ={'''transformer.''' + k: v for k, v in text_enc_dict.items()} __A =convert_text_enc_state_dict_vaa(text_enc_dict) __A ={'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: __A =convert_text_enc_state_dict(text_enc_dict) __A ={'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint __A ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: __A ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: __A ={'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
463
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): lowerCamelCase__: Any =[image] lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image] lowerCamelCase__: Dict =torch.stack(__a ) return image class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_) lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0) lowerCamelCase__: int =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""") lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") lowerCamelCase__: Dict =init_latents.shape lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) # get latents print("add noise to latents at timestep" , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =init_latents return latents @torch.no_grad() def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase_) # 2. Preprocess image lowerCamelCase__: Dict =preprocess(UpperCAmelCase_) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device) lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device) lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_) # 4. Prepare latent variables lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_) lowerCamelCase__: Tuple =latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase_): # 1. predict noise model_output lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__: Optional[int] =self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1) lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase_)
59
0
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore lowerCAmelCase: Any =namedtuple("covid_data", "cases deaths recovered") def __snake_case ( __A = "https://www.worldometers.info/coronavirus/" ) -> covid_data: lowercase : int = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) lowerCAmelCase: Optional[Any] ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
607
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A_ = logging.get_logger(__name__) class snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase : Tuple = ["""input_features""", """is_longer"""] def __init__( self : int , lowerCAmelCase_ : List[str]=64 , lowerCAmelCase_ : Any=48_000 , lowerCAmelCase_ : List[str]=480 , lowerCAmelCase_ : List[str]=10 , lowerCAmelCase_ : Tuple=1_024 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : float = 14_000 , lowerCAmelCase_ : int = None , lowerCAmelCase_ : str = "fusion" , lowerCAmelCase_ : str = "repeatpad" , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE_ = top_db SCREAMING_SNAKE_CASE_ = truncation SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = fft_window_size SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = max_length_s SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = frequency_min SCREAMING_SNAKE_CASE_ = frequency_max SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='''htk''' , ) SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ) def _lowercase ( self : int ) -> Dict[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _lowercase ( self : Any , lowerCAmelCase_ : np.array , lowerCAmelCase_ : Optional[np.array] = None ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ = spectrogram( UpperCAmelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def _lowercase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( UpperCAmelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _lowercase ( self : List[str] , lowerCAmelCase_ : np.array , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> np.array: """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase_ ) - max_length SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters ) SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE_ = False else: SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: SCREAMING_SNAKE_CASE_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE_ = int(max_length / len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE_ = int(max_length / len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters ) SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str , lowerCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : List[Any] , ) -> BatchFeature: """simple docstring""" SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE_ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE_ = is_batched_numpy or ( isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ): SCREAMING_SNAKE_CASE_ = np.asarray(UpperCAmelCase_ , dtype=np.floataa ) elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(UpperCAmelCase_ )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE_ = [ self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for mel, longer in padded_inputs: input_mel.append(UpperCAmelCase_ ) is_longer.append(UpperCAmelCase_ ) if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = True if isinstance(input_mel[0] , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE_ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE_ = {"input_features": input_mel, "is_longer": is_longer} SCREAMING_SNAKE_CASE_ = BatchFeature(UpperCAmelCase_ ) if return_tensors is not None: SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(UpperCAmelCase_ ) return input_features
393
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] ={} if "candidate_labels" in kwargs: lowerCamelCase__: Tuple =kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase__: Tuple =kwargs["hypothesis_template"] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str: '''simple docstring''' lowerCamelCase__: int =load_image(UpperCAmelCase_) lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework) lowerCamelCase__: Any =candidate_labels lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels] lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_) lowerCamelCase__: str =[text_inputs] return inputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: int =model_inputs.pop("candidate_labels") lowerCamelCase__: List[str] =model_inputs.pop("text_inputs") if isinstance(text_inputs[0] , UpperCAmelCase_): lowerCamelCase__: List[Any] =text_inputs[0] else: # Batching case. lowerCamelCase__: List[Any] =text_inputs[0][0] lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: str ={ "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels") lowerCamelCase__: Optional[int] =model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1) lowerCamelCase__: Optional[Any] =probs.tolist() if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Optional[int] =[scores] elif self.framework == "tf": lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1) lowerCamelCase__: Optional[int] =probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""") lowerCamelCase__: Optional[int] =[ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0]) ] return result
59
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ = ['''image_processor''', '''tokenizer'''] UpperCamelCase_ = '''CLIPImageProcessor''' UpperCamelCase_ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self : List[Any] , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCAmelCase_ , ) lowercase : int =kwargs.pop('''feature_extractor''' ) lowercase : int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : List[Any] , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ) -> Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowercase : List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if images is not None: lowercase : int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and images is not None: lowercase : str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def A__ ( self : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def A__ ( self : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def A__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowercase : Optional[Any] =self.tokenizer.model_input_names lowercase : str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
94
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 lowercase_ = jnp.floataa lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' super().setup() lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype) def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = FlaxBigBirdForNaturalQuestionsModule def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" def cross_entropy(__a , __a , __a=None ): lowerCamelCase__: Tuple =logits.shape[-1] lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" ) lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 ) lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase__: Optional[Any] =reduction(__a ) return loss lowerCamelCase__: str =partial(__a , reduction=jnp.mean ) lowerCamelCase__: str =cross_entropy(__a , __a ) lowerCamelCase__: Optional[int] =cross_entropy(__a , __a ) lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = "google/bigbird-roberta-base" lowercase_ = 3000 lowercase_ = 1_0500 lowercase_ = 128 lowercase_ = 3 lowercase_ = 1 lowercase_ = 5 # tx_args lowercase_ = 3E-5 lowercase_ = 0.0 lowercase_ = 2_0000 lowercase_ = 0.0095 lowercase_ = "bigbird-roberta-natural-questions" lowercase_ = "training-expt" lowercase_ = "data/nq-training.jsonl" lowercase_ = "data/nq-validation.jsonl" def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir) lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count() @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 4096 # no dynamic padding on TPUs def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_) lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_) return batch def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"]) lowerCamelCase__: Union[str, Any] ={ "input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa), } return batch def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids] return zip(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))] while len(UpperCAmelCase_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def lowerCAmelCase_ ( __a , __a , __a=None ) -> str: """simple docstring""" if seed is not None: lowerCamelCase__: Any =dataset.shuffle(seed=__a ) for i in range(len(__a ) // batch_size ): lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__a ) @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]: """simple docstring""" def loss_fn(__a ): lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" ) lowerCamelCase__: int =model_inputs.pop("end_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs return state.loss_fn( __a , __a , __a , __a , __a , __a , ) lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a ) lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" ) lowerCamelCase__: List[str] =state.apply_gradients(grads=__a ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , **__a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =model_inputs.pop("start_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("end_labels" ) lowerCamelCase__: int =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class _SCREAMING_SNAKE_CASE ( train_state.TrainState ): '''simple docstring''' lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = None def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model.params lowerCamelCase__: Tuple =TrainState.create( apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , ) if ckpt_dir is not None: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_) lowerCamelCase__: str =train_state.TrainState( step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , ) lowerCamelCase__: Tuple =args lowerCamelCase__: Tuple =data_collator lowerCamelCase__: str =lr lowerCamelCase__: Dict =params lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_) return state def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.args lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size lowerCamelCase__: List[str] =jax.random.PRNGKey(0) lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count()) for epoch in range(args.max_epochs): lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_) lowerCamelCase__: Dict =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""): lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step) lowerCamelCase__: List[Any] =running_loss.item() / i lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1) lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(UpperCAmelCase_)) self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size) lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: Optional[Any] =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "): lowerCamelCase__: int =self.data_collator(UpperCAmelCase_) lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int: '''simple docstring''' lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ") self.model_save_fn(UpperCAmelCase_ , params=state.params) with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib")) joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib")) with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f: json.dump({"step": state.step.item()} , UpperCAmelCase_) print("DONE") def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase__: Tuple =from_bytes(state.params , f.read() ) with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() ) lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) ) lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) ) with open(os.path.join(__a , "training_state.json" ) , "r" ) as f: lowerCamelCase__: Optional[Any] =json.load(__a ) lowerCamelCase__: Any =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" lowerCamelCase__: int =num_train_steps - warmup_steps lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a ) lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a ) lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str: """simple docstring""" def weight_decay_mask(__a ): lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a ) lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(__a ) lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a ) lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a ) return tx, lr
59
0
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Dict = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ F"{test_file} instead." ) _snake_case : List[str] = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) _snake_case : Dict = components[:-1] + [test_fn.replace(""".py""" , """""" )] _snake_case : Dict = ".".join(__a ) return test_module_path def UpperCAmelCase__ (snake_case__ : List[str] ): """simple docstring""" _snake_case : List[Any] = get_module_path(__a ) _snake_case : Any = importlib.import_module(__a ) return test_module def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : Any = [] _snake_case : Optional[Any] = get_test_module(__a ) for attr in dir(__a ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(__a , __a ) ) # sort with class names return sorted(__a , key=lambda snake_case__ : x.__name__ ) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Any = [] _snake_case : Any = get_test_module(__a ) for attr in dir(__a ): _snake_case : List[str] = getattr(__a , __a ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _snake_case : Optional[Any] = getattr(__a , """all_model_classes""" , [] ) if len(__a ) > 0: test_classes.append(__a ) # sort with class names return sorted(__a , key=lambda snake_case__ : x.__name__ ) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Tuple = get_test_classes(__a ) _snake_case : Any = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__a , key=lambda snake_case__ : x.__name__ ) def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" _snake_case : int = test_class() if hasattr(__a , """setUp""" ): test.setUp() _snake_case : List[str] = None if hasattr(__a , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _snake_case : Union[str, Any] = test.model_tester.__class__ return model_tester def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = get_test_classes(__a ) _snake_case : List[Any] = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__a ) # sort with class names return sorted(__a , key=lambda snake_case__ : x.__name__ ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any ): """simple docstring""" _snake_case : Any = get_test_classes_for_model(__a , __a ) _snake_case : Optional[int] = [] for test_class in test_classes: _snake_case : Optional[Any] = get_model_tester_from_test_class(__a ) if tester_class is not None: tester_classes.append(__a ) # sort with class names return sorted(__a , key=lambda snake_case__ : x.__name__ ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : Optional[int] = get_test_classes(__a ) _snake_case : Any = {test_class: get_model_tester_from_test_class(__a ) for test_class in test_classes} return test_tester_mapping def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = get_model_classes(__a ) _snake_case : int = { model_class: get_test_classes_for_model(__a , __a ) for model_class in model_classes } return model_test_mapping def UpperCAmelCase__ (snake_case__ : Dict ): """simple docstring""" _snake_case : int = get_model_classes(__a ) _snake_case : List[str] = { model_class: get_tester_classes_for_model(__a , __a ) for model_class in model_classes } return model_to_tester_mapping def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" if isinstance(__a , __a ): return o elif isinstance(__a , __a ): return o.__name__ elif isinstance(__a , (list, tuple) ): return [to_json(__a ) for x in o] elif isinstance(__a , __a ): return {to_json(__a ): to_json(__a ) for k, v in o.items()} else: return o
609
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "ChineseCLIPImageProcessor" lowercase_ = ("BertTokenizer", "BertTokenizerFast") def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : str) ->Dict: '''simple docstring''' lowerCamelCase__: str =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: Tuple =kwargs.pop("feature_extractor") lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.image_processor def __call__(self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: List[str] =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: Union[str, Any] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int) ->str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: str =self.tokenizer.model_input_names lowerCamelCase__: Union[str, Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class
59
0
def _snake_case ( lowerCAmelCase : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack SCREAMING_SNAKE_CASE_ : set[int] = set() return any( node not in visited and depth_first_search(__a , __a , __a , __a ) for node in graph ) def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict ): """simple docstring""" visited.add(__a ) rec_stk.add(__a ) for node in graph[vertex]: if node not in visited: if depth_first_search(__a , __a , __a , __a ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__a ) return False if __name__ == "__main__": from doctest import testmod testmod()
216
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Optional[int] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: Tuple =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import re from filelock import FileLock try: import nltk A = True except (ImportError, ModuleNotFoundError): A = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
77
def lowerCAmelCase_ ( __a = 50000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =set() lowerCamelCase__: int =int((limit - 24) ** (1 / 2) ) lowerCamelCase__: Tuple =set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __a ) ) ) for primea in primes: lowerCamelCase__: Optional[int] =primea * primea for primea in primes: lowerCamelCase__: List[str] =primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCamelCase__: int =primea * primea * primea * primea lowerCamelCase__: Optional[Any] =square + cube + tetr if total >= limit: break ret.add(__a ) return len(__a ) if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def _snake_case ( lowercase__ ): if num <= 0: raise ValueError('math domain error' ) return quad(__a , 0 , __a , args=(__a) )[0] def _snake_case ( lowercase__ , lowercase__ ): return math.pow(__a , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
630
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: List[str] =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
59
0
from __future__ import annotations import typing from collections import Counter def lowerCamelCase ( a_ ) -> typing.Counter[int]: lowerCAmelCase_ = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__a , max_perimeter + 1 ): lowerCAmelCase_ = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__a ): lowerCAmelCase_ = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase ( a_ = 1_000 ) -> int: lowerCAmelCase_ = pythagorean_triple(__a ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'''Perimeter {solution()} has maximum solutions''')
318
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( __a ) -> float: """simple docstring""" return np.dot(__a , __a ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : List[str] , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None: '''simple docstring''' lowerCamelCase__: Dict =regularization lowerCamelCase__: Any =gamma if kernel == "linear": lowerCamelCase__: Dict =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") if not isinstance(self.gamma , (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") lowerCamelCase__: Tuple =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None: '''simple docstring''' lowerCamelCase__: Optional[Any] =observations lowerCamelCase__: Optional[int] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: lowerCamelCase__: int =0 ((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0) lowerCamelCase__: str =Bounds(0 , self.regularization) lowerCamelCase__: Union[str, Any] =minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x lowerCamelCase__: str =l_star # calculating mean offset of separation plane to points lowerCamelCase__: Tuple =0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) lowerCamelCase__: int =s / n def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
499
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str: """simple docstring""" lowerCamelCase__: int =bnb_quantization_config.load_in_abit lowerCamelCase__: Any =bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) lowerCamelCase__: List[Any] =[] # custom device map if isinstance(__a , __a ) and len(device_map.keys() ) > 1: lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase__: Any =get_keys_to_not_convert(__a ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__a ) lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__a ) # compatibility with peft lowerCamelCase__: List[str] =load_in_abit lowerCamelCase__: int =load_in_abit lowerCamelCase__: Tuple =get_parameter_device(__a ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a ) # convert param to the right dtype lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" ) lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__a ): param.to(__a ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCamelCase__: str =replace_with_bnb_layers( __a , __a , modules_to_not_convert=__a ) lowerCamelCase__: Optional[Any] =get_quantized_model_device_map( __a , __a , __a , max_memory=__a , no_split_module_classes=__a , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase__: Any =True lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__a , device_map=__a , offload_dir=__a ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str: """simple docstring""" if device_map is None: if torch.cuda.is_available(): lowerCamelCase__: str ={"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__a , __a ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) lowerCamelCase__: Optional[int] ={} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase__: Optional[Any] ={} lowerCamelCase__: str =special_dtypes lowerCamelCase__: List[str] =no_split_module_classes lowerCamelCase__: Dict =bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase__: Optional[Any] =get_balanced_memory( __a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , ) lowerCamelCase__: Union[str, Any] =max_memory lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a ) if isinstance(__a , __a ): # check if don't have any quantized module on the cpu lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase__: List[Any] ={ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers( __a , __a , __a , __a ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]: """simple docstring""" lowerCamelCase__: Optional[int] =False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase__: Optional[Any] =[] current_key_name.append(__a ) if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase__: List[str] =".".join(__a ) lowerCamelCase__: Optional[Any] =True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase__: int =False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase__: Dict =bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) lowerCamelCase__: Dict =module.weight.data if module.bias is not None: lowerCamelCase__: List[Any] =module.bias.data bnb_module.requires_grad_(__a ) setattr(__a , __a , __a ) lowerCamelCase__: int =True if len(list(module.children() ) ) > 0: lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers( __a , __a , __a , __a ) lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" with init_empty_weights(): lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase__: str =find_tied_parameters(__a ) # For compatibility with Accelerate < 0.18 if isinstance(__a , __a ): lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase__: str =sum(__a , [] ) lowerCamelCase__: str =len(__a ) > 0 # Check if it is a base model lowerCamelCase__: Optional[Any] =False if hasattr(__a , "base_model_prefix" ): lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase__: Optional[int] =list(model.named_children() ) lowerCamelCase__: Optional[int] =[list_modules[-1][0]] # add last module together with tied weights lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a ) lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a ) # remove ".weight" from the keys lowerCamelCase__: List[Any] =[".weight", ".bias"] lowerCamelCase__: Tuple =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase__: Optional[Any] =name.replace(__a , "" ) filtered_module_names.append(__a ) return filtered_module_names def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" for m in model.modules(): if isinstance(__a , bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" return next(parameter.parameters() ).device def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a ) lowerCamelCase__: Dict =param_name lowerCamelCase__: Tuple =model if "." in tensor_name: lowerCamelCase__: Any =tensor_name.split("." ) for split in splits[:-1]: lowerCamelCase__: Any =getattr(__a , __a ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCamelCase__: str =new_module lowerCamelCase__: int =splits[-1] # offload weights lowerCamelCase__: str =False offload_weight(module._parameters[tensor_name] , __a , __a , index=__a ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , ) else: offload_weight(__a , __a , __a , index=__a ) offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a ) set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : int = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
from __future__ import annotations from math import pi def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
59
0
from __future__ import annotations def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if len(__a ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(__a ) or left < -len(__a ) or right >= len(__a ) or right < -len(__a ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] lowerCamelCase_ = (left + right) >> 1 # the middle lowerCamelCase_ = find_max(__a , __a , __a ) # find max in range[left, mid] lowerCamelCase_ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
463
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
0
"""simple docstring""" import sys import turtle def __snake_case ( __A ,__A ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def __snake_case ( __A ,__A ,__A ,__A ,) -> None: my_pen.up() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) if depth == 0: return triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) lowerCAmelCase: Union[str, Any] =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") lowerCAmelCase: Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
607
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[Any] ="[PAD]" lowerCamelCase__: Tuple =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "[PAD]") self.assertEqual(vocab_keys[1] , "[CLS]") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase_) , 1_012) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] ="Hello World!" lowerCamelCase__: Dict =[35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_)) @slow def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
59
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase : Any = """sew-d""" def __init__( self : Optional[int] , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=768 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Optional[Any]=3_072 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Union[str, Any]=256 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=("p2c", "c2p") , lowerCAmelCase_ : Tuple="layer_norm" , lowerCAmelCase_ : Any="gelu_python" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : List[str]=1e-7 , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Union[str, Any]="group" , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=128 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.05 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[str]="mean" , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=256 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : int=2 , **lowerCAmelCase_ : Tuple , ) -> str: """simple docstring""" super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = feat_extract_norm SCREAMING_SNAKE_CASE_ = feat_extract_activation SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = conv_bias SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE_ = len(self.conv_dim ) SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = squeeze_factor SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = position_buckets SCREAMING_SNAKE_CASE_ = share_att_key SCREAMING_SNAKE_CASE_ = relative_attention SCREAMING_SNAKE_CASE_ = norm_rel_ebd SCREAMING_SNAKE_CASE_ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = feat_proj_dropout SCREAMING_SNAKE_CASE_ = final_dropout SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = feature_layer_norm_eps SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE_ = apply_spec_augment SCREAMING_SNAKE_CASE_ = mask_time_prob SCREAMING_SNAKE_CASE_ = mask_time_length SCREAMING_SNAKE_CASE_ = mask_time_min_masks SCREAMING_SNAKE_CASE_ = mask_feature_prob SCREAMING_SNAKE_CASE_ = mask_feature_length SCREAMING_SNAKE_CASE_ = mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE_ = ctc_loss_reduction SCREAMING_SNAKE_CASE_ = ctc_zero_infinity # sequence classification SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum SCREAMING_SNAKE_CASE_ = classifier_proj_size @property def _lowercase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
393
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small" lowerCamelCase__: Tuple =tempfile.mkdtemp() lowerCamelCase__: Tuple ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: List[str] ="speaker_embeddings_path.json" lowerCamelCase__: int ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Any =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: List[str] =35 lowerCamelCase__: Optional[Any] =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[int] ={ "semantic_prompt": np.ones(UpperCAmelCase_), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: int =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz") np.savez(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_) lowerCamelCase__: List[Any] =processor(text=self.input_string) lowerCamelCase__: Optional[int] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
59
0
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers SCREAMING_SNAKE_CASE = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase_ ( ) -> List[str]: """simple docstring""" lowercase : Optional[Any] =os.path.dirname(os.path.realpath(__a ) ) lowercase : Optional[int] =os.path.join(__a , '''words.txt''' ) lowercase : Dict ="" with open(__a ) as f: lowercase : int =f.readline() lowercase : Any =[word.strip('''\"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] lowercase : Optional[Any] =[ word for word in [sum(ord(__a ) - 6_4 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__a ) if __name__ == "__main__": print(solution())
94
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "CLIPImageProcessor" lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: int =kwargs.pop("feature_extractor") lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names lowerCamelCase__: str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
59
0
"""simple docstring""" from __future__ import annotations import requests A_ = set( '''approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'''.split() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Union[str, Any] = 1 , snake_case__ : int = "new" , snake_case__ : str = None ): """simple docstring""" _snake_case : Optional[int] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__a ) - valid_terms ) ): _snake_case : int = F"Invalid search term: {invalid_search_terms}" raise ValueError(__a ) _snake_case : List[str] = requests.get( F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , ) if response.status_code == 4_29: raise requests.HTTPError _snake_case : Tuple = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__a )} _snake_case : Dict = {} for id_ in range(__a ): _snake_case : Dict = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
609
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" for param in module.parameters(): lowerCamelCase__: Tuple =False def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__: str ="mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[str] =datetime.now() lowerCamelCase__: str =current_time.strftime("%H:%M:%S" ) return timestamp
59
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase : Union[str, Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
216
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } A = { """unc-nlp/lxmert-base-uncased""": 512, } A = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class a__ ( __SCREAMING_SNAKE_CASE ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = LxmertTokenizer def __init__( self : Optional[int] , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]="[UNK]" , UpperCamelCase_ : Dict="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : int="[CLS]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : int , ): """simple docstring""" super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) __UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(UpperCAmelCase_ , normalizer_state.pop("type")) __UpperCAmelCase : Tuple = do_lower_case __UpperCAmelCase : Union[str, Any] = strip_accents __UpperCAmelCase : Dict = tokenize_chinese_chars __UpperCAmelCase : Dict = normalizer_class(**UpperCAmelCase_) __UpperCAmelCase : Any = do_lower_case def a_ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None): """simple docstring""" __UpperCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a_ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" __UpperCAmelCase : List[Any] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
77
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"] lowercase_ = DistilBertTokenizer def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Optional[int] =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: Any =tokenize_chinese_chars lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: str =do_lower_case def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict: '''simple docstring''' lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
59
0
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'num_attention_heads' ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'num_encoder_blocks' ) ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ): _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : Tuple = image_size _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : Dict = num_encoder_blocks _lowerCamelCase : Dict = sr_ratios _lowerCamelCase : Union[str, Any] = depths _lowerCamelCase : Tuple = hidden_sizes _lowerCamelCase : Any = downsampling_rates _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = is_training _lowerCamelCase : Any = use_labels _lowerCamelCase : str = hidden_act _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : int = initializer_range _lowerCamelCase : int = num_labels _lowerCamelCase : List[Any] = scope def A_ ( self ): _lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def A_ ( self ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : str = SegformerModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ ) _lowerCamelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Tuple = self.num_labels _lowerCamelCase : Any = SegformerForSemanticSegmentation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowerCamelCase : Any = model(UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = 1 _lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowerCamelCase : str = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCAmelCase_ ) _lowerCamelCase : Tuple = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() _lowerCamelCase : Union[str, Any] = config_and_inputs _lowerCamelCase : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) lowerCamelCase__ = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : Tuple = SegformerModelTester(self ) _lowerCamelCase : str = SegformerConfigTester(self , config_class=UpperCAmelCase_ ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase_ ) def A_ ( self ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase_ ) @unittest.skip('SegFormer does not use inputs_embeds' ) def A_ ( self ): pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' ) def A_ ( self ): pass def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase_ ) _lowerCamelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : int = [*signature.parameters.keys()] _lowerCamelCase : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[Any] = True for model_class in self.all_model_classes: _lowerCamelCase : Optional[int] = True _lowerCamelCase : Dict = False _lowerCamelCase : Any = True _lowerCamelCase : str = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) _lowerCamelCase : Tuple = outputs.attentions _lowerCamelCase : List[Any] = sum(self.model_tester.depths ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : List[str] = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) _lowerCamelCase : List[Any] = outputs.attentions self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # verify the first attentions (first block, first layer) _lowerCamelCase : Dict = (self.model_tester.image_size // 4) ** 2 _lowerCamelCase : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _lowerCamelCase : Any = (self.model_tester.image_size // 32) ** 2 _lowerCamelCase : str = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _lowerCamelCase : int = len(UpperCAmelCase_ ) # Check attention is always last and order is fine _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[Any] = True _lowerCamelCase : Dict = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(out_len + 1 , len(UpperCAmelCase_ ) ) _lowerCamelCase : Any = outputs.attentions self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # verify the first attentions (first block, first layer) _lowerCamelCase : Union[str, Any] = (self.model_tester.image_size // 4) ** 2 _lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def A_ ( self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowerCamelCase : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) _lowerCamelCase : Any = outputs.hidden_states _lowerCamelCase : Any = self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Tuple = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Any = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self ): if not self.model_tester.is_training: return _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase_ ): continue _lowerCamelCase : List[str] = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() _lowerCamelCase : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) _lowerCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A_ ( self ): pass @slow def A_ ( self ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = SegformerModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def _snake_case ( ): _lowerCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_ ) _lowerCamelCase : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( UpperCAmelCase_ ) _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ) _lowerCamelCase : List[str] = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowerCamelCase : Dict = model(UpperCAmelCase_ ) _lowerCamelCase : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) ) @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_ ) _lowerCamelCase : str = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(UpperCAmelCase_ ) _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ) _lowerCamelCase : Tuple = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowerCamelCase : List[Any] = model(UpperCAmelCase_ ) _lowerCamelCase : Any = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) _lowerCamelCase : Optional[int] = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-1 ) ) @slow def A_ ( self ): _lowerCamelCase : Union[str, Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_ ) _lowerCamelCase : str = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( UpperCAmelCase_ ) _lowerCamelCase : int = prepare_img() _lowerCamelCase : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ) _lowerCamelCase : Any = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ ) _lowerCamelCase : Dict = outputs.logits.detach().cpu() _lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(500, 300)] ) _lowerCamelCase : List[str] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase_ ) _lowerCamelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ ) _lowerCamelCase : Dict = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
630
import operator as op def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[Any] =[] lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation lowerCamelCase__: Tuple ={ "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(__a )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__a ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) else: lowerCamelCase__: List[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) lowerCamelCase__: Optional[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) stack.append( str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
59
0
import qiskit def lowerCamelCase ( a_ , a_ ) -> qiskit.result.counts.Counts: lowerCAmelCase_ = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase_ = qiskit.QuantumCircuit(__a , __a ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCAmelCase_ = qiskit.execute(__a , __a , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__a ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A : str = logging.get_logger(__name__) # pylint: disable=invalid-name __A : int = 256 class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): lowercase : Any = ['melgan'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() # From MELGAN UpperCamelCase : List[Any] = math.log(1e-5 ) # Matches MelGAN training. UpperCamelCase : str = 4.0 # Largest value for most examples UpperCamelCase : Dict = 128 self.register_modules( notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , ) def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=(-1.0, 1.0) , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase : str = output_range if clip: UpperCamelCase : Any = torch.clip(UpperCAmelCase_ , self.min_value , self.max_value ) # Scale to [0, 1]. UpperCamelCase : Tuple = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=(-1.0, 1.0) , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase : List[Any] = input_range UpperCamelCase : List[str] = torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if clip else outputs # Scale to [0, 1]. UpperCamelCase : List[str] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = input_tokens > 0 UpperCamelCase : int = self.notes_encoder( encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_ ) UpperCamelCase : Dict = self.continuous_encoder( encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Tuple = noise_time if not torch.is_tensor(UpperCAmelCase_ ): UpperCamelCase : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0: UpperCamelCase : Union[str, Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase : Tuple = self.decoder( encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_ ) return logits @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "numpy" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0) ): raise ValueError( f'`callback_steps` has to be a positive integer but is {callback_steps} of type' f' {type(UpperCAmelCase_ )}.' ) UpperCamelCase : List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) UpperCamelCase : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa ) UpperCamelCase : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device ) for i, encoder_input_tokens in enumerate(UpperCAmelCase_ ): if i == 0: UpperCamelCase : Optional[Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. UpperCamelCase : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. UpperCamelCase : Tuple = ones UpperCamelCase : Optional[Any] = self.scale_features( UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_ ) UpperCamelCase : Tuple = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop UpperCamelCase : List[Any] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCamelCase : Optional[int] = self.decode( encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 UpperCamelCase : str = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample UpperCamelCase : Optional[Any] = self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0] ) UpperCamelCase : List[Any] = mel[:1] UpperCamelCase : Optional[int] = mel.cpu().float().numpy() UpperCamelCase : int = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCAmelCase_ , UpperCAmelCase_ ) logger.info("""Generated segment""" , UpperCAmelCase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": UpperCamelCase : Any = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: UpperCamelCase : Union[str, Any] = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCAmelCase_ )
499
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = CustomTokenizer pass
59
0
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = analyze_text(__a ) lowerCAmelCase = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(__a ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(__a ) / all_sum my_sec_sum += prob * math.loga(__a ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__a ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE (): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
4
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: List[Any] =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) lowerCamelCase__: Any =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) lowerCamelCase__: Tuple =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Union[str, Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Dict =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""") with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""") lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) if __name__ == "__main__": __A = Accelerator() __A = (accelerator.state.process_index + 2, 10) __A = torch.randint(0, 10, shape).to(accelerator.device) __A = "" __A = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
59
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): lowerCAmelCase__ = 'Speech2TextFeatureExtractor' lowerCAmelCase__ = 'Speech2TextTokenizer' def __init__( self , lowercase , lowercase ) -> Any: super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = self.feature_extractor lowerCamelCase_ = False def __call__( self , *lowercase , **lowercase ) -> Optional[Any]: if self._in_target_context_manager: return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) lowerCamelCase_ = kwargs.pop("raw_speech" ) else: lowerCamelCase_ = kwargs.pop("audio" , UpperCAmelCase_ ) lowerCamelCase_ = kwargs.pop("sampling_rate" , UpperCAmelCase_ ) lowerCamelCase_ = kwargs.pop("text" , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: lowerCamelCase_ = args[0] lowerCamelCase_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: lowerCamelCase_ = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None: lowerCamelCase_ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase_ = encodings["input_ids"] return inputs def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]: return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Tuple: return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @contextmanager def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) lowerCamelCase_ = True lowerCamelCase_ = self.tokenizer yield lowerCamelCase_ = self.feature_extractor lowerCamelCase_ = False
463
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): lowerCamelCase__: Any =[image] lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image] lowerCamelCase__: Dict =torch.stack(__a ) return image class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_) lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0) lowerCamelCase__: int =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""") lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") lowerCamelCase__: Dict =init_latents.shape lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) # get latents print("add noise to latents at timestep" , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =init_latents return latents @torch.no_grad() def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase_) # 2. Preprocess image lowerCamelCase__: Dict =preprocess(UpperCAmelCase_) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device) lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device) lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_) # 4. Prepare latent variables lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_) lowerCamelCase__: Tuple =latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase_): # 1. predict noise model_output lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__: Optional[int] =self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1) lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase_)
59
0
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowerCAmelCase: int =sys.version_info >= (3, 10) def __snake_case ( __A=None ,__A=None ) -> Tuple: return field(default_factory=lambda: default ,metadata=__a ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 @dataclass class lowerCamelCase__ : __UpperCAmelCase = 42 __UpperCAmelCase = field(default="""toto""" , metadata={"""help""": """help message"""} ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = None class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): __UpperCAmelCase = """titi""" __UpperCAmelCase = """toto""" class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): __UpperCAmelCase = """titi""" __UpperCAmelCase = """toto""" __UpperCAmelCase = 42 @dataclass class lowerCamelCase__ : __UpperCAmelCase = """toto""" def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" lowercase : List[str] = BasicEnum(self.foo ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = """toto""" def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" lowercase : Tuple = MixedTypeEnum(self.foo ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = None __UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """help message"""} ) __UpperCAmelCase = None __UpperCAmelCase = list_field(default=[] ) __UpperCAmelCase = list_field(default=[] ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = list_field(default=[] ) __UpperCAmelCase = list_field(default=[1, 2, 3] ) __UpperCAmelCase = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) __UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = field() __UpperCAmelCase = field() __UpperCAmelCase = field() def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" lowercase : str = BasicEnum(self.required_enum ) @dataclass class lowerCamelCase__ : __UpperCAmelCase = 42 __UpperCAmelCase = field() __UpperCAmelCase = None __UpperCAmelCase = field(default="""toto""" , metadata={"""help""": """help message"""} ) __UpperCAmelCase = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class lowerCamelCase__ : __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = None @dataclass class lowerCamelCase__ : __UpperCAmelCase = None __UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """help message"""} ) __UpperCAmelCase = None __UpperCAmelCase = list_field(default=[] ) __UpperCAmelCase = list_field(default=[] ) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCAmelCase ( self , snake_case , snake_case ) -> Union[str, Any]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowercase : Any = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != "container"} lowercase : Optional[Any] = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" lowercase : Optional[int] = HfArgumentParser(UpperCAmelCase_ ) lowercase : Dict = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Optional[Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (lowercase ) : str = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : Union[str, Any] = HfArgumentParser(UpperCAmelCase_ ) lowercase : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=4_2 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) lowercase : Tuple = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: lowercase : int = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Any = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) lowercase : Optional[Any] = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) lowercase : Any = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) lowercase : Any = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) lowercase : str = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def _UpperCAmelCase ( self ) -> Any: """simple docstring""" lowercase : str = HfArgumentParser(UpperCAmelCase_ ) lowercase : str = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 4_2] , type=make_choice_type_function(["""titi""", """toto""", 4_2] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : str = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) lowercase : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowercase : int = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) lowercase : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowercase : Optional[Any] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 4_2 ) lowercase : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _UpperCAmelCase ( self ) -> str: """simple docstring""" @dataclass class lowerCamelCase__ : __UpperCAmelCase = """toto""" lowercase : Tuple = HfArgumentParser(UpperCAmelCase_ ) lowercase : Dict = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 4_2) , type=make_choice_type_function(["""titi""", """toto""", 4_2] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : int = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) lowercase : str = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) lowercase : int = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 4_2 ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" lowercase : str = HfArgumentParser(UpperCAmelCase_ ) lowercase : List[Any] = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Tuple = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) lowercase : Optional[Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) lowercase : str = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: lowercase : Optional[int] = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : List[str] = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) lowercase : Union[str, Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=1_2 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : Dict = HfArgumentParser(UpperCAmelCase_ ) lowercase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> str: """simple docstring""" lowercase : Union[str, Any] = HfArgumentParser(UpperCAmelCase_ ) lowercase : int = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" lowercase : List[str] = HfArgumentParser(UpperCAmelCase_ ) lowercase : Tuple = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } lowercase : List[Any] = parser.parse_dict(UpperCAmelCase_ )[0] lowercase : Dict = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" lowercase : List[Any] = HfArgumentParser(UpperCAmelCase_ ) lowercase : List[Any] = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, "extra": 4_2, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> Any: """simple docstring""" lowercase : List[str] = HfArgumentParser(UpperCAmelCase_ ) lowercase : List[Any] = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : List[Any] = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] lowercase : Union[str, Any] = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : int = HfArgumentParser(UpperCAmelCase_ ) lowercase : List[str] = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Optional[int] = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) lowercase : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] lowercase : List[Any] = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _UpperCAmelCase ( self ) -> str: """simple docstring""" lowercase : Optional[Any] = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
607
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
0
from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : Any , lowerCAmelCase_ : int ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ = order # a_{0} ... a_{k} SCREAMING_SNAKE_CASE_ = [1.0] + [0.0] * order # b_{0} ... b_{k} SCREAMING_SNAKE_CASE_ = [1.0] + [0.0] * order # x[n-1] ... x[n-k] SCREAMING_SNAKE_CASE_ = [0.0] * self.order # y[n-1] ... y[n-k] SCREAMING_SNAKE_CASE_ = [0.0] * self.order def _lowercase ( self : Optional[int] , lowerCAmelCase_ : list[float] , lowerCAmelCase_ : list[float] ) -> None: """simple docstring""" if len(UpperCAmelCase_ ) < self.order: SCREAMING_SNAKE_CASE_ = [1.0, *a_coeffs] if len(UpperCAmelCase_ ) != self.order + 1: SCREAMING_SNAKE_CASE_ = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(UpperCAmelCase_ )}''' ) raise ValueError(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) != self.order + 1: SCREAMING_SNAKE_CASE_ = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(UpperCAmelCase_ )}''' ) raise ValueError(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ = a_coeffs SCREAMING_SNAKE_CASE_ = b_coeffs def _lowercase ( self : str , lowerCAmelCase_ : float ) -> float: """simple docstring""" SCREAMING_SNAKE_CASE_ = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) SCREAMING_SNAKE_CASE_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] SCREAMING_SNAKE_CASE_ = self.input_history[:-1] SCREAMING_SNAKE_CASE_ = self.output_history[:-1] SCREAMING_SNAKE_CASE_ = sample SCREAMING_SNAKE_CASE_ = result return result
393
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Optional[int] , **UpperCAmelCase_ : List[Any]) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__(self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Any: '''simple docstring''' lowerCamelCase__: Optional[int] ={} if "candidate_labels" in kwargs: lowerCamelCase__: Tuple =kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase__: Tuple =kwargs["hypothesis_template"] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}.") ->str: '''simple docstring''' lowerCamelCase__: int =load_image(UpperCAmelCase_) lowerCamelCase__: Any =self.image_processor(images=[image] , return_tensors=self.framework) lowerCamelCase__: Any =candidate_labels lowerCamelCase__: List[str] =[hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels] lowerCamelCase__: int =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_) lowerCamelCase__: str =[text_inputs] return inputs def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: int =model_inputs.pop("candidate_labels") lowerCamelCase__: List[str] =model_inputs.pop("text_inputs") if isinstance(text_inputs[0] , UpperCAmelCase_): lowerCamelCase__: List[Any] =text_inputs[0] else: # Batching case. lowerCamelCase__: List[Any] =text_inputs[0][0] lowerCamelCase__: List[str] =self.model(**UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: str ={ "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =model_outputs.pop("candidate_labels") lowerCamelCase__: Optional[int] =model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase__: Optional[Any] =logits.softmax(dim=-1).squeeze(-1) lowerCamelCase__: Optional[Any] =probs.tolist() if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: Optional[int] =[scores] elif self.framework == "tf": lowerCamelCase__: List[str] =stable_softmax(UpperCAmelCase_ , axis=-1) lowerCamelCase__: Optional[int] =probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""") lowerCamelCase__: Optional[int] =[ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0]) ] return result
59
0
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') SCREAMING_SNAKE_CASE = parser.parse_args() if args.model_type == "bert": SCREAMING_SNAKE_CASE = BertForMaskedLM.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE = 'bert' else: raise ValueError('args.model_type should be \"bert\".') SCREAMING_SNAKE_CASE = model.state_dict() SCREAMING_SNAKE_CASE = {} for w in ["word_embeddings", "position_embeddings"]: SCREAMING_SNAKE_CASE = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] SCREAMING_SNAKE_CASE = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] SCREAMING_SNAKE_CASE = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 SCREAMING_SNAKE_CASE = state_dict['cls.predictions.decoder.weight'] SCREAMING_SNAKE_CASE = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE = state_dict[f"""cls.predictions.transform.dense.{w}"""] SCREAMING_SNAKE_CASE = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
94
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 lowercase_ = jnp.floataa lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]: '''simple docstring''' super().setup() lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype) def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = FlaxBigBirdForNaturalQuestionsModule def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" def cross_entropy(__a , __a , __a=None ): lowerCamelCase__: Tuple =logits.shape[-1] lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" ) lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 ) lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase__: Optional[Any] =reduction(__a ) return loss lowerCamelCase__: str =partial(__a , reduction=jnp.mean ) lowerCamelCase__: str =cross_entropy(__a , __a ) lowerCamelCase__: Optional[int] =cross_entropy(__a , __a ) lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = "google/bigbird-roberta-base" lowercase_ = 3000 lowercase_ = 1_0500 lowercase_ = 128 lowercase_ = 3 lowercase_ = 1 lowercase_ = 5 # tx_args lowercase_ = 3E-5 lowercase_ = 0.0 lowercase_ = 2_0000 lowercase_ = 0.0095 lowercase_ = "bigbird-roberta-natural-questions" lowercase_ = "training-expt" lowercase_ = "data/nq-training.jsonl" lowercase_ = "data/nq-validation.jsonl" def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir) lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count() @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 4096 # no dynamic padding on TPUs def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_) lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_) return batch def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"]) lowerCamelCase__: Union[str, Any] ={ "input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa), } return batch def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids] return zip(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))] while len(UpperCAmelCase_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def lowerCAmelCase_ ( __a , __a , __a=None ) -> str: """simple docstring""" if seed is not None: lowerCamelCase__: Any =dataset.shuffle(seed=__a ) for i in range(len(__a ) // batch_size ): lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__a ) @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]: """simple docstring""" def loss_fn(__a ): lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" ) lowerCamelCase__: int =model_inputs.pop("end_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs return state.loss_fn( __a , __a , __a , __a , __a , __a , ) lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a ) lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a ) lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" ) lowerCamelCase__: List[str] =state.apply_gradients(grads=__a ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowerCAmelCase_ ( __a , **__a ) -> List[Any]: """simple docstring""" lowerCamelCase__: int =model_inputs.pop("start_labels" ) lowerCamelCase__: List[str] =model_inputs.pop("end_labels" ) lowerCamelCase__: int =model_inputs.pop("pooled_labels" ) lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a ) lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class _SCREAMING_SNAKE_CASE ( train_state.TrainState ): '''simple docstring''' lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE ) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = None def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Dict =model.params lowerCamelCase__: Tuple =TrainState.create( apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , ) if ckpt_dir is not None: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_) lowerCamelCase__: str =train_state.TrainState( step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , ) lowerCamelCase__: Tuple =args lowerCamelCase__: Tuple =data_collator lowerCamelCase__: str =lr lowerCamelCase__: Dict =params lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_) return state def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.args lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size lowerCamelCase__: List[str] =jax.random.PRNGKey(0) lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count()) for epoch in range(args.max_epochs): lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_) lowerCamelCase__: Dict =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""): lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step) lowerCamelCase__: List[Any] =running_loss.item() / i lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1) lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(UpperCAmelCase_)) self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size) lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa) lowerCamelCase__: Optional[Any] =0 for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "): lowerCamelCase__: int =self.data_collator(UpperCAmelCase_) lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int: '''simple docstring''' lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ") self.model_save_fn(UpperCAmelCase_ , params=state.params) with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib")) joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib")) with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f: json.dump({"step": state.step.item()} , UpperCAmelCase_) print("DONE") def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase__: Tuple =from_bytes(state.params , f.read() ) with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() ) lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) ) lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) ) with open(os.path.join(__a , "training_state.json" ) , "r" ) as f: lowerCamelCase__: Optional[Any] =json.load(__a ) lowerCamelCase__: Any =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" lowerCamelCase__: int =num_train_steps - warmup_steps lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a ) lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a ) lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str: """simple docstring""" def weight_decay_mask(__a ): lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a ) lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(__a ) lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a ) lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a ) return tx, lr
59
0
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowercase( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase__ = XLMProphetNetTokenizer lowercase__ = False lowercase__ = True def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _snake_case : Any = XLMProphetNetTokenizer(UpperCAmelCase_, keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = "[PAD]" _snake_case : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ), UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ), UpperCAmelCase_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], """[PAD]""" ) self.assertEqual(vocab_keys[1], """[CLS]""" ) self.assertEqual(vocab_keys[-1], """j""" ) self.assertEqual(len(UpperCAmelCase_ ), 1_012 ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_012 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Optional[Any] = XLMProphetNetTokenizer(UpperCAmelCase_, keep_accents=UpperCAmelCase_ ) _snake_case : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) _snake_case : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ], ) _snake_case : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) _snake_case : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ], ) @cached_property def UpperCamelCase_ ( self: Any ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = "Hello World!" _snake_case : Dict = [35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_, self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_, model_name="""microsoft/xprophetnet-large-wiki100-cased""", revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""", )
609
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "ChineseCLIPImageProcessor" lowercase_ = ("BertTokenizer", "BertTokenizerFast") def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : str) ->Dict: '''simple docstring''' lowerCamelCase__: str =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: Tuple =kwargs.pop("feature_extractor") lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.image_processor def __call__(self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: List[str] =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: Union[str, Any] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int) ->str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: str =self.tokenizer.model_input_names lowerCamelCase__: Union[str, Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class
59
0
import math def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ): """simple docstring""" if ( not isinstance(__a , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] ): """simple docstring""" if ( not isinstance(__a , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
216
from math import ceil, sqrt def lowerCAmelCase_ ( __a = 1000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__: Optional[int] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__: Tuple =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _UpperCamelCase ( UpperCamelCase ) -> Any: """simple docstring""" __UpperCAmelCase : List[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def _UpperCamelCase ( UpperCamelCase ) -> List[str]: """simple docstring""" __UpperCAmelCase : str = emb.weight.shape __UpperCAmelCase : Optional[Any] = nn.Linear(__a , __a , bias=__a ) __UpperCAmelCase : str = emb.weight.data return lin_layer def _UpperCamelCase ( UpperCamelCase ) -> Any: """simple docstring""" __UpperCAmelCase : str = torch.load(__a , map_location="cpu" ) __UpperCAmelCase : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"] __UpperCAmelCase : Dict = mam_aaa["model"] remove_ignore_keys_(__a ) __UpperCAmelCase : Any = state_dict["encoder.embed_tokens.weight"].shape[0] __UpperCAmelCase : Dict = MaMaaaConfig( vocab_size=__a , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) __UpperCAmelCase : Tuple = state_dict["decoder.embed_tokens.weight"] __UpperCAmelCase : Optional[Any] = MaMaaaForConditionalGeneration(__a ) model.model.load_state_dict(__a , strict=__a ) __UpperCAmelCase : Optional[Any] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") A = parser.parse_args() A = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
77
def lowerCAmelCase_ ( __a = 50000000 ) -> int: """simple docstring""" lowerCamelCase__: Any =set() lowerCamelCase__: int =int((limit - 24) ** (1 / 2) ) lowerCamelCase__: Tuple =set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __a ) ) ) for primea in primes: lowerCamelCase__: Optional[int] =primea * primea for primea in primes: lowerCamelCase__: List[str] =primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCamelCase__: int =primea * primea * primea * primea lowerCamelCase__: Optional[Any] =square + cube + tetr if total >= limit: break ret.add(__a ) return len(__a ) if __name__ == "__main__": print(f'{solution() = }')
59
0
"""simple docstring""" import random from typing import Any def _snake_case ( lowercase__ ): for _ in range(len(__a ) ): _lowerCamelCase : Tuple = random.randint(0 , len(__a ) - 1 ) _lowerCamelCase : str = random.randint(0 , len(__a ) - 1 ) _lowerCamelCase : Optional[int] = data[b], data[a] return data if __name__ == "__main__": lowercase__ = [0, 1, 2, 3, 4, 5, 6, 7] lowercase__ = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
630
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float: """simple docstring""" lowerCamelCase__: List[str] =a while True: lowerCamelCase__: Optional[Any] =Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
59
0
def lowerCamelCase ( a_ ) -> list: for i in range(len(__a ) - 1 , 0 , -1 ): lowerCAmelCase_ = False for j in range(__a , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCAmelCase_ = unsorted[j - 1], unsorted[j] lowerCAmelCase_ = True for j in range(__a ): if unsorted[j] > unsorted[j + 1]: lowerCAmelCase_ = unsorted[j + 1], unsorted[j] lowerCAmelCase_ = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase_ = [int(item) for item in user_input.split(""",""")] print(f'''{cocktail_shaker_sort(unsorted) = }''')
318
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( __a ) -> float: """simple docstring""" return np.dot(__a , __a ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : List[str] , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None: '''simple docstring''' lowerCamelCase__: Dict =regularization lowerCamelCase__: Any =gamma if kernel == "linear": lowerCamelCase__: Dict =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") if not isinstance(self.gamma , (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") lowerCamelCase__: Tuple =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}""" raise ValueError(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None: '''simple docstring''' lowerCamelCase__: Optional[Any] =observations lowerCamelCase__: Optional[int] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: lowerCamelCase__: int =0 ((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0) lowerCamelCase__: str =Bounds(0 , self.regularization) lowerCamelCase__: Union[str, Any] =minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x lowerCamelCase__: str =l_star # calculating mean offset of separation plane to points lowerCamelCase__: Tuple =0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) lowerCamelCase__: int =s / n def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
59
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): lowercase : List[str] = ['image_processor', 'tokenizer'] lowercase : Any = 'ChineseCLIPImageProcessor' lowercase : Optional[int] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCAmelCase_ , ) UpperCamelCase : Tuple = kwargs.pop("""feature_extractor""" ) UpperCamelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase : Optional[int] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCamelCase : Dict = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if images is not None: UpperCamelCase : List[str] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and images is not None: UpperCamelCase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def a_ ( self ): UpperCamelCase : str = self.tokenizer.model_input_names UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , ) return self.image_processor_class
499
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str: """simple docstring""" lowerCamelCase__: int =bnb_quantization_config.load_in_abit lowerCamelCase__: Any =bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) lowerCamelCase__: List[Any] =[] # custom device map if isinstance(__a , __a ) and len(device_map.keys() ) > 1: lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase__: Any =get_keys_to_not_convert(__a ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__a ) lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__a ) # compatibility with peft lowerCamelCase__: List[str] =load_in_abit lowerCamelCase__: int =load_in_abit lowerCamelCase__: Tuple =get_parameter_device(__a ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a ) # convert param to the right dtype lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" ) lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__a ): param.to(__a ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCamelCase__: str =replace_with_bnb_layers( __a , __a , modules_to_not_convert=__a ) lowerCamelCase__: Optional[Any] =get_quantized_model_device_map( __a , __a , __a , max_memory=__a , no_split_module_classes=__a , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase__: Any =True lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__a , device_map=__a , offload_dir=__a ) def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str: """simple docstring""" if device_map is None: if torch.cuda.is_available(): lowerCamelCase__: str ={"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__a , __a ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) lowerCamelCase__: Optional[int] ={} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase__: Optional[Any] ={} lowerCamelCase__: str =special_dtypes lowerCamelCase__: List[str] =no_split_module_classes lowerCamelCase__: Dict =bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase__: Optional[Any] =get_balanced_memory( __a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , ) lowerCamelCase__: Union[str, Any] =max_memory lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a ) if isinstance(__a , __a ): # check if don't have any quantized module on the cpu lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase__: List[Any] ={ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]: """simple docstring""" if modules_to_not_convert is None: lowerCamelCase__: List[Any] =[] lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers( __a , __a , __a , __a ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]: """simple docstring""" lowerCamelCase__: Optional[int] =False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase__: Optional[Any] =[] current_key_name.append(__a ) if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase__: List[str] =".".join(__a ) lowerCamelCase__: Optional[Any] =True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase__: int =False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase__: Dict =bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) lowerCamelCase__: Dict =module.weight.data if module.bias is not None: lowerCamelCase__: List[Any] =module.bias.data bnb_module.requires_grad_(__a ) setattr(__a , __a , __a ) lowerCamelCase__: int =True if len(list(module.children() ) ) > 0: lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers( __a , __a , __a , __a ) lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" with init_empty_weights(): lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase__: str =find_tied_parameters(__a ) # For compatibility with Accelerate < 0.18 if isinstance(__a , __a ): lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase__: str =sum(__a , [] ) lowerCamelCase__: str =len(__a ) > 0 # Check if it is a base model lowerCamelCase__: Optional[Any] =False if hasattr(__a , "base_model_prefix" ): lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase__: Optional[int] =list(model.named_children() ) lowerCamelCase__: Optional[int] =[list_modules[-1][0]] # add last module together with tied weights lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a ) lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a ) # remove ".weight" from the keys lowerCamelCase__: List[Any] =[".weight", ".bias"] lowerCamelCase__: Tuple =[] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase__: Optional[Any] =name.replace(__a , "" ) filtered_module_names.append(__a ) return filtered_module_names def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" for m in model.modules(): if isinstance(__a , bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" return next(parameter.parameters() ).device def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a ) lowerCamelCase__: Dict =param_name lowerCamelCase__: Tuple =model if "." in tensor_name: lowerCamelCase__: Any =tensor_name.split("." ) for split in splits[:-1]: lowerCamelCase__: Any =getattr(__a , __a ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCamelCase__: str =new_module lowerCamelCase__: int =splits[-1] # offload weights lowerCamelCase__: str =False offload_weight(module._parameters[tensor_name] , __a , __a , index=__a ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , ) else: offload_weight(__a , __a , __a , index=__a ) offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a ) set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
59
0
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a ( __SCREAMING_SNAKE_CASE ): def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , **_snake_case , ): """simple docstring""" super().__init__( UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths} lowerCAmelCase = Text( cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , **UpperCAmelCase_ , ) def UpperCamelCase__ ( self ): """simple docstring""" if self.streaming: lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None self.builder.download_and_prepare( download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , ) lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory ) return dataset
4
from __future__ import annotations from math import pi def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
59
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A =logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): def __init__( self , **lowercase ) -> Any: super().__init__(**UpperCAmelCase_ ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Tuple: lowerCamelCase_ = {} lowerCamelCase_ = {} lowerCamelCase_ = {} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase_ = kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase_ = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase_ = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase_ = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase_ = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase_ = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase_ = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase_ = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase_ = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase_ = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase_ = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase_ = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> Optional[Any]: return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ) -> Dict: lowerCamelCase_ = load_image(UpperCAmelCase_ ) lowerCamelCase_ = self.image_processor.size["longest_edge"] lowerCamelCase_ = self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = self.image_processor(images=UpperCAmelCase_ , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": lowerCamelCase_ = self.get_inference_context() with inference_context(): lowerCamelCase_ = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device ) lowerCamelCase_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) lowerCamelCase_ = image_embeddings lowerCamelCase_ = grid_points.shape[1] lowerCamelCase_ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase_ = input_labels[:, i : i + points_per_batch] lowerCamelCase_ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0.8_8 , lowercase=0.9_5 , lowercase=0 , lowercase=1 , ) -> Optional[Any]: lowerCamelCase_ = model_inputs.pop("input_boxes" ) lowerCamelCase_ = model_inputs.pop("is_last" ) lowerCamelCase_ = model_inputs.pop("original_sizes" ).tolist() lowerCamelCase_ = model_inputs.pop("reshaped_input_sizes" ).tolist() lowerCamelCase_ = self.model(**UpperCAmelCase_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase_ = model_outputs["pred_masks"] lowerCamelCase_ = self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ ) lowerCamelCase_ = model_outputs["iou_scores"] lowerCamelCase_ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ) -> Tuple: lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) lowerCamelCase_ = torch.cat(UpperCAmelCase_ ) lowerCamelCase_ = torch.cat(UpperCAmelCase_ ) lowerCamelCase_ = self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_ ) lowerCamelCase_ = {} if output_rle_mask: lowerCamelCase_ = rle_mask if output_bboxes_mask: lowerCamelCase_ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
463
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCamelCase__ ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" lowercase : List[Any] = inspect.getfile(accelerate.test_utils ) lowercase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) lowercase : Any = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) lowercase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def _UpperCAmelCase ( self ) -> str: """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowercase : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowercase : Dict = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" lowercase : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) lowercase : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase: Union[str, Any] =Accelerator() lowerCAmelCase: Optional[Any] =(accelerator.state.process_index + 2, 10) lowerCAmelCase: str =torch.randint(0, 10, shape).to(accelerator.device) lowerCAmelCase: Optional[int] ="" lowerCAmelCase: Optional[Any] =accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCAmelCase: Dict =accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCAmelCase: Dict =accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
607
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = XLMProphetNetTokenizer lowercase_ = False lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[Any] ="[PAD]" lowerCamelCase__: Tuple =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Dict) ->int: '''simple docstring''' lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "[PAD]") self.assertEqual(vocab_keys[1] , "[CLS]") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase_) , 1_012) def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_012) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[int] ="Hello World!" lowerCamelCase__: Dict =[35_389, 6_672, 49, 2] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_)) @slow def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
59
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 A_ = data_utils.TransfoXLTokenizer A_ = data_utils.TransfoXLCorpus A_ = data_utils A_ = data_utils def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> List[str]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a ,'''rb''' ) as fp: SCREAMING_SNAKE_CASE_ = pickle.load(__a ,encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) SCREAMING_SNAKE_CASE_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) SCREAMING_SNAKE_CASE_ = corpus.vocab.__dict__ torch.save(__a ,__a ) SCREAMING_SNAKE_CASE_ = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' ,__a ) SCREAMING_SNAKE_CASE_ = pytorch_dump_folder_path + "/" + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(__a ,__a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model SCREAMING_SNAKE_CASE_ = os.path.abspath(__a ) SCREAMING_SNAKE_CASE_ = os.path.abspath(__a ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": SCREAMING_SNAKE_CASE_ = TransfoXLConfig() else: SCREAMING_SNAKE_CASE_ = TransfoXLConfig.from_json_file(__a ) print(f'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE_ = TransfoXLLMHeadModel(__a ) SCREAMING_SNAKE_CASE_ = load_tf_weights_in_transfo_xl(__a ,__a ,__a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ = os.path.join(__a ,__a ) SCREAMING_SNAKE_CASE_ = os.path.join(__a ,__a ) print(f'''Save PyTorch model to {os.path.abspath(__a )}''' ) torch.save(model.state_dict() ,__a ) print(f'''Save configuration file to {os.path.abspath(__a )}''' ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) A_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
393
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="ylacombe/bark-small" lowerCamelCase__: Tuple =tempfile.mkdtemp() lowerCamelCase__: Tuple ="en_speaker_1" lowerCamelCase__: Optional[int] ="This is a test string" lowerCamelCase__: List[str] ="speaker_embeddings_path.json" lowerCamelCase__: int ="speaker_embeddings" def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =BarkProcessor(tokenizer=UpperCAmelCase_) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Dict =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCamelCase__: Dict =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Any =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Any =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCamelCase__: List[str] =35 lowerCamelCase__: Optional[Any] =2 lowerCamelCase__: Optional[Any] =8 lowerCamelCase__: Optional[int] ={ "semantic_prompt": np.ones(UpperCAmelCase_), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: int =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from npz file lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , "file.npz") np.savez(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist()) # test loading voice preset from the hub lowerCamelCase__: Any =processor(text=self.input_string , voice_preset=self.voice_preset) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Dict =BarkProcessor(tokenizer=UpperCAmelCase_) lowerCamelCase__: List[Any] =processor(text=self.input_string) lowerCamelCase__: Optional[int] =tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
59
0
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-1' SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-2' SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-3' SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4' class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : StableDiffusionSafetyChecker , UpperCAmelCase : CLIPImageProcessor , UpperCAmelCase : bool = True , ) -> Union[str, Any]: '''simple docstring''' super()._init_() lowercase : Tuple =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ ) lowercase : List[str] =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ ) lowercase : Optional[Any] =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ ) lowercase : str =StableDiffusionPipeline( vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def A__ ( self : Optional[Any] ) -> Dict[str, Any]: '''simple docstring''' return {k: getattr(self , UpperCAmelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )} def A__ ( self : Any , UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase : Tuple =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase_ ) def A__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' self.enable_attention_slicing(UpperCAmelCase_ ) @torch.no_grad() def A__ ( self : Dict , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Optional[int] , ) -> Dict: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) @torch.no_grad() def A__ ( self : int , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : int , ) -> int: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) @torch.no_grad() def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : Dict , ) -> List[str]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) @torch.no_grad() def A__ ( self : Any , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : List[str] , ) -> List[str]: '''simple docstring''' return self.pipea( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) @torch.no_grad() def A__ ( self : str , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : List[Any] , ) -> List[str]: '''simple docstring''' lowercase : Dict ="cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowercase : int =self.textaimg_sda_a( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowercase : Any =self.textaimg_sda_a( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowercase : Dict =self.textaimg_sda_a( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowercase : Tuple =self.textaimg_sda_a( prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
94
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["image_processor", "tokenizer"] lowercase_ = "CLIPImageProcessor" lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) lowerCamelCase__: int =kwargs.pop("feature_extractor") lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if images is not None: lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: lowerCamelCase__: str =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names lowerCamelCase__: str =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
59
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class lowercase: '''simple docstring''' def __init__( self: str, a_: List[Any], a_: Dict=14, a_: Tuple=7, a_: Dict=True, a_: Optional[Any]=True, a_: Optional[Any]=False, a_: List[str]=True, a_: Dict=99, a_: List[str]=32, a_: Optional[int]=4, a_: Any=4, a_: List[Any]=4, a_: Any=37, a_: List[str]="gelu", a_: Tuple=0.1, a_: Optional[Any]=0.1, a_: Dict=512, a_: Any=0.02, ): '''simple docstring''' _snake_case : Union[str, Any] = parent _snake_case : Dict = batch_size _snake_case : Tuple = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : Union[str, Any] = use_labels _snake_case : Dict = vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : str = rotary_dim _snake_case : Dict = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : List[str] = intermediate_size _snake_case : Optional[Any] = hidden_act _snake_case : int = hidden_dropout_prob _snake_case : Optional[Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Optional[int] = initializer_range _snake_case : str = None _snake_case : str = vocab_size - 1 _snake_case : List[Any] = vocab_size - 1 _snake_case : int = vocab_size - 1 def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : Optional[int] = None if self.use_input_mask: _snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[Any] = GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=UpperCAmelCase_, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) return (config, input_ids, input_mask) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[str] = self.prepare_config_and_inputs() _snake_case : Union[str, Any] = config_and_inputs _snake_case : Optional[int] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCamelCase_ ( self: Dict, a_: Union[str, Any], a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : Any = 20 _snake_case : str = model_class_name(UpperCAmelCase_ ) _snake_case : Any = model.init_cache(input_ids.shape[0], UpperCAmelCase_ ) _snake_case : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="""i4""" ) _snake_case : Dict = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) _snake_case : Optional[Any] = model( input_ids[:, :-1], attention_mask=UpperCAmelCase_, past_key_values=UpperCAmelCase_, position_ids=UpperCAmelCase_, ) _snake_case : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" ) _snake_case : List[Any] = model( input_ids[:, -1:], attention_mask=UpperCAmelCase_, past_key_values=outputs_cache.past_key_values, position_ids=UpperCAmelCase_, ) _snake_case : List[str] = model(UpperCAmelCase_ ) _snake_case : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" ) def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: str, a_: int, a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = 20 _snake_case : int = model_class_name(UpperCAmelCase_ ) _snake_case : int = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, ) _snake_case : int = model.init_cache(input_ids.shape[0], UpperCAmelCase_ ) _snake_case : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) _snake_case : List[str] = model( input_ids[:, :-1], attention_mask=UpperCAmelCase_, past_key_values=UpperCAmelCase_, position_ids=UpperCAmelCase_, ) _snake_case : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" ) _snake_case : int = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=UpperCAmelCase_, position_ids=UpperCAmelCase_, ) _snake_case : List[str] = model(UpperCAmelCase_, attention_mask=UpperCAmelCase_ ) _snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" ) @require_flax class lowercase( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowercase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = FlaxGPTJModelTester(self ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) @tooslow def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained("""gpt2""", pad_token="""<|endoftext|>""", padding_side="""left""" ) _snake_case : Any = tokenizer(["""Hello this is a long string""", """Hey"""], return_tensors="""np""", padding=UpperCAmelCase_, truncation=UpperCAmelCase_ ) _snake_case : str = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) _snake_case : Union[str, Any] = False _snake_case : List[str] = model.config.eos_token_id _snake_case : Optional[Any] = jax.jit(model.generate ) _snake_case : List[str] = jit_generate( inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], pad_token_id=tokenizer.pad_token_id ).sequences _snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase_, skip_special_tokens=UpperCAmelCase_ ) _snake_case : Dict = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(UpperCAmelCase_, UpperCAmelCase_ ) @is_pt_flax_cross_test def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _snake_case : List[Any] = self._prepare_for_class(UpperCAmelCase_, UpperCAmelCase_ ) _snake_case : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _snake_case : List[Any] = getattr(UpperCAmelCase_, UpperCAmelCase_ ) _snake_case : List[Any] = pt_inputs["input_ids"].shape _snake_case : Optional[Any] = np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): _snake_case : str = 0 _snake_case : Optional[Any] = 1 _snake_case : int = 0 _snake_case : Union[str, Any] = 1 _snake_case : Dict = pt_model_class(UpperCAmelCase_ ).eval() _snake_case : Any = model_class(UpperCAmelCase_, dtype=jnp.floataa ) _snake_case : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), UpperCAmelCase_ ) _snake_case : List[str] = fx_state with torch.no_grad(): _snake_case : Optional[int] = pt_model(**UpperCAmelCase_ ).to_tuple() _snake_case : Dict = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ), """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_, UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) _snake_case : Optional[int] = model_class.from_pretrained(UpperCAmelCase_, from_pt=UpperCAmelCase_ ) _snake_case : str = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ), len(UpperCAmelCase_ ), """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_, UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @is_pt_flax_cross_test def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _snake_case : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_, UpperCAmelCase_ ) _snake_case : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _snake_case : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning _snake_case : List[Any] = getattr(UpperCAmelCase_, UpperCAmelCase_ ) _snake_case : Optional[Any] = pt_model_class(UpperCAmelCase_ ).eval() _snake_case : str = model_class(UpperCAmelCase_, dtype=jnp.floataa ) _snake_case : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase_, fx_model.params ) _snake_case : Optional[Any] = pt_inputs["input_ids"].shape _snake_case : List[Any] = np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): _snake_case : str = 0 _snake_case : Union[str, Any] = 1 _snake_case : Dict = 0 _snake_case : int = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _snake_case : List[Any] = pt_model(**UpperCAmelCase_ ).to_tuple() _snake_case : Optional[int] = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ), """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_, UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) _snake_case : str = pt_model_class.from_pretrained(UpperCAmelCase_, from_flax=UpperCAmelCase_ ) with torch.no_grad(): _snake_case : Optional[int] = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ), len(UpperCAmelCase_ ), """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_, UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @tooslow def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: _snake_case : List[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) _snake_case : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
609
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" for param in module.parameters(): lowerCamelCase__: Tuple =False def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase__: List[str] ="cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCamelCase__: str ="mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Union[str, Any] =plt.imshow(__a ) fig.axes.get_xaxis().set_visible(__a ) fig.axes.get_yaxis().set_visible(__a ) plt.show() def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: List[str] =datetime.now() lowerCamelCase__: str =current_time.strftime("%H:%M:%S" ) return timestamp
59
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class a__ ( __SCREAMING_SNAKE_CASE ): A = 'deit' def __init__( self : str,_A : str=768,_A : Dict=12,_A : List[str]=12,_A : List[str]=3072,_A : int="gelu",_A : Optional[Any]=0.0,_A : Dict=0.0,_A : List[Any]=0.02,_A : int=1E-12,_A : Union[str, Any]=224,_A : Dict=16,_A : int=3,_A : Tuple=True,_A : List[Any]=16,**_A : List[Any],): """simple docstring""" super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers SCREAMING_SNAKE_CASE_ : str = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE_ : List[str] = image_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels SCREAMING_SNAKE_CASE_ : Optional[Any] = qkv_bias SCREAMING_SNAKE_CASE_ : Tuple = encoder_stride class a__ ( __SCREAMING_SNAKE_CASE ): A = version.parse('1.11' ) @property def __UpperCamelCase ( self : int ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" return 1E-4
216
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ["""ConvNextFeatureExtractor"""] A = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
77
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } __A = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } __A = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["input_ids", "attention_mask"] lowercase_ = DistilBertTokenizer def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str: '''simple docstring''' super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars ): lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type")) lowerCamelCase__: Optional[int] =do_lower_case lowerCamelCase__: int =strip_accents lowerCamelCase__: Any =tokenize_chinese_chars lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_) lowerCamelCase__: str =do_lower_case def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict: '''simple docstring''' lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_)
59
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = ["""input_features""", """attention_mask"""] def __init__( self , lowercase=80 , lowercase=16000 , lowercase=0.0 , lowercase=10 , lowercase=25 , lowercase="hamming_window" , lowercase=32768.0 , lowercase=0.97 , lowercase=1.0 , lowercase=True , lowercase=True , lowercase=False , **lowercase , ): super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ ) _lowerCamelCase : Tuple = feature_size _lowerCamelCase : str = sampling_rate _lowerCamelCase : Tuple = padding_value _lowerCamelCase : Optional[Any] = hop_length _lowerCamelCase : List[str] = win_length _lowerCamelCase : Dict = frame_signal_scale _lowerCamelCase : Optional[int] = preemphasis_coeff _lowerCamelCase : int = mel_floor _lowerCamelCase : Optional[int] = normalize_means _lowerCamelCase : Optional[int] = normalize_vars _lowerCamelCase : List[Any] = win_function _lowerCamelCase : int = return_attention_mask _lowerCamelCase : Union[str, Any] = win_length * sampling_rate // 1000 _lowerCamelCase : List[Any] = hop_length * sampling_rate // 1000 _lowerCamelCase : Tuple = optimal_fft_length(self.sample_size ) _lowerCamelCase : Optional[Any] = (self.n_fft // 2) + 1 def A_ ( self , lowercase ): if self.win_function == "hamming_window": _lowerCamelCase : Tuple = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase_ ) else: _lowerCamelCase : int = window_function(window_length=self.sample_size , name=self.win_function ) _lowerCamelCase : int = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) _lowerCamelCase : Dict = spectrogram( one_waveform * self.frame_signal_scale , window=UpperCAmelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCAmelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCAmelCase_ , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def A_ ( self , lowercase , lowercase , lowercase ): if self.normalize_means: _lowerCamelCase : str = x[:input_length].mean(axis=0 ) _lowerCamelCase : Optional[Any] = np.subtract(UpperCAmelCase_ , UpperCAmelCase_ ) if self.normalize_vars: _lowerCamelCase : Tuple = x[:input_length].std(axis=0 ) _lowerCamelCase : Union[str, Any] = np.divide(UpperCAmelCase_ , UpperCAmelCase_ ) if input_length < x.shape[0]: _lowerCamelCase : List[str] = padding_value # make sure array is in float32 _lowerCamelCase : Union[str, Any] = x.astype(np.floataa ) return x def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(UpperCAmelCase_ , UpperCAmelCase_ , self.padding_value ) for x, n in zip(UpperCAmelCase_ , UpperCAmelCase_ )] def __call__( self , lowercase , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowerCamelCase : Optional[Any] = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowerCamelCase : Optional[Any] = is_batched_numpy or ( isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCamelCase : Tuple = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ): _lowerCamelCase : str = np.asarray(UpperCAmelCase_ , dtype=np.floataa ) elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCamelCase : Optional[int] = [raw_speech] # extract fbank features _lowerCamelCase : List[Any] = [self._extract_mfsc_features(UpperCAmelCase_ ) for one_waveform in raw_speech] # convert into correct format for padding _lowerCamelCase : Dict = BatchFeature({'input_features': features} ) _lowerCamelCase : Any = self.pad( UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , ) # make sure list is in array format _lowerCamelCase : Optional[Any] = padded_inputs.get('input_features' ) if isinstance(input_features[0] , UpperCAmelCase_ ): _lowerCamelCase : Any = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_features] _lowerCamelCase : Dict = padded_inputs.get('attention_mask' ) if attention_mask is not None: _lowerCamelCase : str = [np.asarray(UpperCAmelCase_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: _lowerCamelCase : str = ( np.array(UpperCAmelCase_ , dtype=np.intaa ) if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) _lowerCamelCase : Optional[Any] = self.normalize( padded_inputs['input_features'] , attention_mask=UpperCAmelCase_ ) if return_tensors is not None: _lowerCamelCase : int = padded_inputs.convert_to_tensors(UpperCAmelCase_ ) return padded_inputs
630
import operator as op def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[Any] =[] lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation lowerCamelCase__: Tuple ={ "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(__a )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__a ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) else: lowerCamelCase__: List[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) lowerCamelCase__: Optional[Any] =stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " ) stack.append( str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
59
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") lowerCamelCase_ = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: lowerCamelCase_ = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCamelCase_ = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
318
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : List[Any] , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase_) requires_backends(self , "vision") requires_backends(self , "torch") if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] ={} lowerCamelCase__: Tuple ={} lowerCamelCase__: str ={} # preprocess args if "points_per_batch" in kwargs: lowerCamelCase__: Optional[Any] =kwargs["points_per_batch"] if "points_per_crop" in kwargs: lowerCamelCase__: int =kwargs["points_per_crop"] if "crops_n_layers" in kwargs: lowerCamelCase__: Any =kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: lowerCamelCase__: Tuple =kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: lowerCamelCase__: List[Any] =kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: lowerCamelCase__: List[str] =kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: lowerCamelCase__: int =kwargs["stability_score_offset"] if "mask_threshold" in kwargs: lowerCamelCase__: Optional[int] =kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: lowerCamelCase__: str =kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: lowerCamelCase__: Any =kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: lowerCamelCase__: List[Any] =kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: lowerCamelCase__: List[str] =kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict) ->Optional[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , ) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =load_image(UpperCAmelCase_) lowerCamelCase__: List[str] =self.image_processor.size["longest_edge"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor.generate_crop_boxes( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =self.image_processor(images=UpperCAmelCase_ , return_tensors="pt") with self.device_placement(): if self.framework == "pt": lowerCamelCase__: str =self.get_inference_context() with inference_context(): lowerCamelCase__: Union[str, Any] =self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device) lowerCamelCase__: Optional[Any] =self.model.get_image_embeddings(model_inputs.pop("pixel_values")) lowerCamelCase__: str =image_embeddings lowerCamelCase__: int =grid_points.shape[1] lowerCamelCase__: int =points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None") for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_): lowerCamelCase__: int =grid_points[:, i : i + points_per_batch, :, :] lowerCamelCase__: Optional[Any] =input_labels[:, i : i + points_per_batch] lowerCamelCase__: Dict =i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=0.88 , UpperCAmelCase_ : Optional[Any]=0.95 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any=1 , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =model_inputs.pop("input_boxes") lowerCamelCase__: Dict =model_inputs.pop("is_last") lowerCamelCase__: int =model_inputs.pop("original_sizes").tolist() lowerCamelCase__: Union[str, Any] =model_inputs.pop("reshaped_input_sizes").tolist() lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_) # post processing happens here in order to avoid CPU GPU copies of ALL the masks lowerCamelCase__: Optional[int] =model_outputs["pred_masks"] lowerCamelCase__: Union[str, Any] =self.image_processor.post_process_masks( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =model_outputs["iou_scores"] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=0.7 , ) ->Tuple: '''simple docstring''' lowerCamelCase__: Any =[] lowerCamelCase__: Optional[int] =[] lowerCamelCase__: List[str] =[] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) lowerCamelCase__: str =torch.cat(UpperCAmelCase_) lowerCamelCase__: List[str] =torch.cat(UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.image_processor.post_process_for_mask_generation( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =defaultdict(UpperCAmelCase_) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase_) lowerCamelCase__: Any ={} if output_rle_mask: lowerCamelCase__: Union[str, Any] =rle_mask if output_bboxes_mask: lowerCamelCase__: int =bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
59
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __A : Any = logging.get_logger(__name__) class lowerCamelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""bs4"""] ) super().__init__(**UpperCAmelCase_ ) def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Any = [] UpperCamelCase : List[str] = [] UpperCamelCase : Optional[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Any = parent.find_all(child.name , recursive=UpperCAmelCase_ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(UpperCAmelCase_ ) else next(i for i, s in enumerate(UpperCAmelCase_ , 1 ) if s is child ) ) UpperCamelCase : Union[str, Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = BeautifulSoup(UpperCAmelCase_ , """html.parser""" ) UpperCamelCase : Tuple = [] UpperCamelCase : Tuple = [] UpperCamelCase : Any = [] for element in html_code.descendants: if type(UpperCAmelCase_ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Tuple = html.unescape(UpperCAmelCase_ ).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCAmelCase_ ) UpperCamelCase : Optional[int] = self.xpath_soup(UpperCAmelCase_ ) stringaxtag_seq.append(UpperCAmelCase_ ) stringaxsubs_seq.append(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[Any] = "" for tagname, subs in zip(UpperCAmelCase_ , UpperCAmelCase_ ): xpath += f'/{tagname}' if subs != 0: xpath += f'[{subs}]' return xpath def __call__( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[str] = False # Check that strings has a valid type if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCamelCase : int = True elif isinstance(UpperCAmelCase_ , (list, tuple) ): if len(UpperCAmelCase_ ) == 0 or isinstance(html_strings[0] , UpperCAmelCase_ ): UpperCamelCase : str = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ f'but is of type {type(UpperCAmelCase_ )}.' ) UpperCamelCase : Tuple = bool(isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCAmelCase_ )) ) if not is_batched: UpperCamelCase : List[Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : List[str] = [] UpperCamelCase : List[str] = [] for html_string in html_strings: UpperCamelCase : Any = self.get_three_from_single(UpperCAmelCase_ ) nodes.append(UpperCAmelCase_ ) UpperCamelCase : Any = [] for node, tag_list, sub_list in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): UpperCamelCase : List[Any] = self.construct_xpath(UpperCAmelCase_ , UpperCAmelCase_ ) xpath_strings.append(UpperCAmelCase_ ) xpaths.append(UpperCAmelCase_ ) # return as Dict UpperCamelCase : int = {"nodes": nodes, "xpaths": xpaths} UpperCamelCase : int = BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ ) return encoded_inputs
499
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = CustomTokenizer pass
59
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] = None ): if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path lowerCAmelCase = quote(__a ) return hfh.hf_hub_url(__a , __a , repo_type='dataset' , revision=__a )
4
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: List[Any] =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) lowerCamelCase__: Any =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]) lowerCamelCase__: Tuple =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"]) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Union[str, Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Dict =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""") with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple: '''simple docstring''' lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""") lowerCamelCase__: int =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy()) if __name__ == "__main__": __A = Accelerator() __A = (accelerator.state.process_index + 2, 10) __A = torch.randint(0, 10, shape).to(accelerator.device) __A = "" __A = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __A = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __A = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
59
0
import warnings from functools import wraps from typing import Callable def lowerCamelCase_ ( lowerCamelCase__ ): @wraps(__a ) def _inner_fn(*lowerCamelCase__ , **lowerCamelCase__ ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __a , ) return fn(*__a , **__a ) return _inner_fn
463
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __A = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): lowerCamelCase__: Any =[image] lowerCamelCase__: Optional[Any] =[trans(img.convert("RGB" ) ) for img in image] lowerCamelCase__: Dict =torch.stack(__a ) return image class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__: Tuple =DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: int =min(int(num_inference_steps * strength) , UpperCAmelCase_) lowerCamelCase__: str =max(num_inference_steps - init_timestep , 0) lowerCamelCase__: int =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None) ->Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_)}""") lowerCamelCase__: Optional[int] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(UpperCAmelCase_)}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") lowerCamelCase__: Dict =init_latents.shape lowerCamelCase__: int =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_) # get latents print("add noise to latents at timestep" , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =init_latents return latents @torch.no_grad() def __call__(self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase_) # 2. Preprocess image lowerCamelCase__: Dict =preprocess(UpperCAmelCase_) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device) lowerCamelCase__ , lowerCamelCase__: str =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device) lowerCamelCase__: Optional[int] =timesteps[:1].repeat(UpperCAmelCase_) # 4. Prepare latent variables lowerCamelCase__: int =self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_) lowerCamelCase__: Tuple =latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase_): # 1. predict noise model_output lowerCamelCase__: Dict =self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__: Optional[int] =self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample lowerCamelCase__: str =(image / 2 + 0.5).clamp(0 , 1) lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": lowerCamelCase__: Dict =self.numpy_to_pil(UpperCAmelCase_) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase_)
59
0
"""simple docstring""" lowerCAmelCase: str =[ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
607
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
0