code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class _a (UpperCamelCase_, unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Any = GPTSwaTokenizer lowerCAmelCase_ : Dict = False lowerCAmelCase_ : Any = True lowerCAmelCase_ : Union[str, Any] = False def snake_case_ ( self ) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case : Union[str, Any] = GPTSwaTokenizer(a__ ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case_ ( self ,__a ) -> Any: snake_case : Dict = "This is a test" snake_case : List[str] = "This is a test" return input_text, output_text def snake_case_ ( self ) -> Optional[int]: snake_case : str = "<s>" snake_case : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) ,a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) ,a__ ) def snake_case_ ( self ) -> str: snake_case : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(a__ ) ,2_000 ) def snake_case_ ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size ,2_000 ) def snake_case_ ( self ) -> int: snake_case : Optional[Any] = GPTSwaTokenizer(a__ ) snake_case : List[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a__ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[465, 287, 265, 631, 842] ) snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( a__ ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ ,[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,) snake_case : str = tokenizer.convert_ids_to_tokens(a__ ) # fmt: off self.assertListEqual( a__ ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def snake_case_ ( self ) -> Optional[Any]: snake_case : Dict = GPTSwaTokenizer(a__ ) snake_case : str = ["This is a test", "I was born in 92000, and this is falsé."] snake_case : Any = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(a__ ,a__ ): self.assertListEqual(tokenizer.encode_fast(a__ ) ,a__ ) # Test that decode_fast returns the input text for text, token_ids in zip(a__ ,a__ ): self.assertEqual(tokenizer.decode_fast(a__ ) ,a__ ) @slow def snake_case_ ( self ) -> Union[str, Any]: snake_case : str = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off snake_case : Union[str, Any] = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=a__ ,)
116
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case = logging.get_logger(__name__) snake_case = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class lowerCAmelCase ( UpperCamelCase_ ): A_ : Tuple = """van""" def __init__( self : List[Any] , a__ : Dict=224 , a__ : Dict=3 , a__ : Union[str, Any]=[7, 3, 3, 3] , a__ : Optional[Any]=[4, 2, 2, 2] , a__ : Optional[Any]=[64, 128, 320, 512] , a__ : List[str]=[3, 3, 12, 3] , a__ : Any=[8, 8, 4, 4] , a__ : Optional[int]="gelu" , a__ : List[Any]=0.02 , a__ : Tuple=1e-6 , a__ : List[str]=1e-2 , a__ : List[str]=0.0 , a__ : List[Any]=0.0 , **a__ : Tuple , ): '''simple docstring''' super().__init__(**a__ ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : List[Any] = patch_sizes lowerCAmelCase__ : Dict = strides lowerCAmelCase__ : List[str] = hidden_sizes lowerCAmelCase__ : Union[str, Any] = depths lowerCAmelCase__ : Tuple = mlp_ratios lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : int = layer_norm_eps lowerCAmelCase__ : Optional[Any] = layer_scale_init_value lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Any = dropout_rate
378
0
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=9_9 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict=3_7 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=5_0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any , ) -> int: __SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any] , ) -> int: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int , ) -> int: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = BertGenerationDecoder(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() # first forward pass __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0] __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , *UpperCAmelCase__ : int , ) -> List[str]: __SCREAMING_SNAKE_CASE = BertGenerationDecoder(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : str ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () snake_case__ : Dict = (BertGenerationDecoder,) if is_torch_available() else () snake_case__ : List[Any] = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCAmelCase_ ( self : str ) -> Dict: __SCREAMING_SNAKE_CASE = BertGenerationEncoderTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = "bert" self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Tuple: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : Any ) -> int: __SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __SCREAMING_SNAKE_CASE = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : str ) -> str: __SCREAMING_SNAKE_CASE = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __SCREAMING_SNAKE_CASE = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
715
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase_ : """simple docstring""" @staticmethod def UpperCAmelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]: pass @is_pipeline_test @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> str: __SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) __SCREAMING_SNAKE_CASE = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Any: __SCREAMING_SNAKE_CASE = vqa_pipeline(UpperCAmelCase__ , top_k=1 ) self.assertEqual( UpperCAmelCase__ , [ [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}], [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}], ] , ) @require_torch def UpperCAmelCase_ ( self : Optional[Any] ) -> int: __SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) __SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" __SCREAMING_SNAKE_CASE = "How many cats are there?" __SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question="How many cats are there?" , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] ) __SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] ) @slow @require_torch def UpperCAmelCase_ ( self : Tuple ) -> Any: __SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) __SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" __SCREAMING_SNAKE_CASE = "How many cats are there?" __SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) __SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) __SCREAMING_SNAKE_CASE = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCAmelCase_ ( self : str ) -> Optional[Any]: pass
553
0
'''simple docstring''' def __snake_case ( lowerCamelCase_ : int = 10**12 ): '''simple docstring''' __magic_name__ = 1 __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
664
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCamelCase__ ( ): __UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) __UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(__lowerCamelCase ) DownloadCommand.register_subcommand(__lowerCamelCase ) EnvironmentCommand.register_subcommand(__lowerCamelCase ) RunCommand.register_subcommand(__lowerCamelCase ) ServeCommand.register_subcommand(__lowerCamelCase ) UserCommands.register_subcommand(__lowerCamelCase ) AddNewModelCommand.register_subcommand(__lowerCamelCase ) AddNewModelLikeCommand.register_subcommand(__lowerCamelCase ) LfsCommands.register_subcommand(__lowerCamelCase ) PTtoTFCommand.register_subcommand(__lowerCamelCase ) # Let's go __UpperCAmelCase : Optional[Any] = parser.parse_args() if not hasattr(__lowerCamelCase , """func""" ): parser.print_help() exit(1 ) # Run __UpperCAmelCase : Tuple = args.func(__lowerCamelCase ) service.run() if __name__ == "__main__": main()
63
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
709
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
230
0
'''simple docstring''' import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __snake_case: '''simple docstring''' UpperCAmelCase : str = None def __snake_case ( self ) -> str: lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , A_ ) def __snake_case ( self ) -> List[str]: lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = os.path.join(A_ , """feat_extract.json""" ) feat_extract_first.to_json_file(A_ ) lowerCAmelCase = self.feature_extraction_class.from_json_file(A_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) lowerCAmelCase = self.feature_extraction_class.from_pretrained(A_ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __snake_case ( self ) -> Optional[int]: lowerCAmelCase = self.feature_extraction_class() self.assertIsNotNone(A_ )
433
'''simple docstring''' def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> int: """simple docstring""" lowerCAmelCase, lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowerCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
433
1
"""simple docstring""" def lowercase_ ( _lowercase : str ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) UpperCAmelCase : Optional[int] = sorted(string.lower() ) return len(_lowercase ) == len(set(_lowercase ) ) if __name__ == "__main__": snake_case_ : Tuple = input("""Enter a string """).strip() snake_case_ : str = is_isogram(input_str) print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
292
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowercase : Optional[int] , _lowercase : int ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict ): '''simple docstring''' UpperCAmelCase : Dict = tmp_path / "cache" UpperCAmelCase : str = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_text_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def lowercase_ ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ): '''simple docstring''' UpperCAmelCase : int = tmp_path / "cache" UpperCAmelCase : Any = {"text": "string"} UpperCAmelCase : List[str] = features.copy() if features else default_expected_features UpperCAmelCase : Union[str, Any] = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : str = TextDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read() _check_text_dataset(_lowercase , _lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : List[str] ): '''simple docstring''' UpperCAmelCase : str = tmp_path / "cache" UpperCAmelCase : Any = {"text": "string"} UpperCAmelCase : List[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read() _check_text_dataset(_lowercase , _lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowercase_ ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : Union[str, Any] ): '''simple docstring''' if issubclass(_lowercase , _lowercase ): UpperCAmelCase : List[str] = text_path elif issubclass(_lowercase , _lowercase ): UpperCAmelCase : List[Any] = [text_path] UpperCAmelCase : Union[str, Any] = tmp_path / "cache" UpperCAmelCase : List[Any] = {"text": "string"} UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_text_dataset(_lowercase , _lowercase ) def lowercase_ ( _lowercase : Dict , _lowercase : int , _lowercase : Optional[Any]=("train",) ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) for split in splits: UpperCAmelCase : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowercase_ ( _lowercase : Dict , _lowercase : str , _lowercase : Dict ): '''simple docstring''' UpperCAmelCase : str = tmp_path / "cache" UpperCAmelCase : Union[str, Any] = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : Tuple = TextDatasetReader({"train": text_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read() _check_text_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def lowercase_ ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : int ): '''simple docstring''' UpperCAmelCase : int = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" UpperCAmelCase : List[str] = {"text": "string"} UpperCAmelCase : str = features.copy() if features else default_expected_features UpperCAmelCase : Optional[int] = ( Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Optional[int] = TextDatasetReader({"train": text_path} , features=_lowercase , cache_dir=_lowercase ).read() _check_text_datasetdict(_lowercase , _lowercase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : Optional[Any] ): '''simple docstring''' if split: UpperCAmelCase : Optional[Any] = {split: text_path} else: UpperCAmelCase : str = "train" UpperCAmelCase : Union[str, Any] = {"train": text_path, "test": text_path} UpperCAmelCase : Any = tmp_path / "cache" UpperCAmelCase : List[Any] = {"text": "string"} UpperCAmelCase : Any = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read() _check_text_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
292
1
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __lowercase ): _snake_case =DistilBertTokenizer _snake_case =DistilBertTokenizerFast _snake_case =True @slow def lowerCAmelCase__ ( self: int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase_ =tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase ) UpperCAmelCase_ =tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase ) UpperCAmelCase_ =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) UpperCAmelCase_ =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
54
from __future__ import annotations import os from typing import Any import requests lowerCAmelCase : str = 'https://api.github.com' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowerCAmelCase : Optional[Any] = BASE_URL + '/user' # https://github.com/settings/tokens lowerCAmelCase : Optional[int] = os.environ.get('USER_TOKEN', '') def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = { 'Authorization': f"token {auth_token}", 'Accept': 'application/vnd.github.v3+json', } return requests.get(a , headers=a ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F'{key}: {value}') else: raise ValueError('\'USER_TOKEN\' field cannot be empty.')
511
0
from __future__ import annotations def _lowerCamelCase ( _a , _a ): """simple docstring""" _lowerCamelCase = get_failure_array(_a ) # 2) Step through text searching for pattern _lowerCamelCase , _lowerCamelCase = 0, 0 # index into text, pattern while i < len(_a ): if pattern[j] == text[i]: if j == (len(_a ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _lowerCamelCase = failure[j - 1] continue i += 1 return False def _lowerCamelCase ( _a ): """simple docstring""" _lowerCamelCase = [0] _lowerCamelCase = 0 _lowerCamelCase = 1 while j < len(_a ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _lowerCamelCase = failure[i - 1] continue j += 1 failure.append(_a ) return failure if __name__ == "__main__": # Test 1) _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert kmp(pattern, text) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) _UpperCAmelCase = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
297
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( _a , _a , _a ): """simple docstring""" _lowerCamelCase = MobileBertConfig.from_json_file(_a ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase = MobileBertForPreTraining(_a ) # Load weights from tf checkpoint _lowerCamelCase = load_tf_weights_in_mobilebert(_a , _a , _a ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _a ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
297
1
from __future__ import annotations from math import pow, sqrt def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case_ , 2 ) + pow(snake_case_ , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
64
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Tuple = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
509
0
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase_ ): """simple docstring""" __A = ["""torch""", """torchsde"""] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" requires_backends(self , ['torch', 'torchsde'] ) @classmethod def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" requires_backends(cls , ['torch', 'torchsde'] ) @classmethod def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" requires_backends(cls , ['torch', 'torchsde'] )
703
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class SCREAMING_SNAKE_CASE ( __snake_case ): """simple docstring""" __A = """unispeech-sat""" def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase ) snake_case_ = hidden_size snake_case_ = feat_extract_norm snake_case_ = feat_extract_activation snake_case_ = list(__UpperCamelCase ) snake_case_ = list(__UpperCamelCase ) snake_case_ = list(__UpperCamelCase ) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = len(self.conv_dim ) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size snake_case_ = num_clusters snake_case_ = do_stable_layer_norm snake_case_ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations snake_case_ = num_codevectors_per_group snake_case_ = num_codevector_groups snake_case_ = contrastive_logits_temperature snake_case_ = feat_quantizer_dropout snake_case_ = num_negatives snake_case_ = codevector_dim snake_case_ = proj_codevector_dim snake_case_ = diversity_loss_weight # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case_ = list(__UpperCamelCase ) snake_case_ = list(__UpperCamelCase ) snake_case_ = list(__UpperCamelCase ) snake_case_ = xvector_output_dim @property def __lowerCAmelCase ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
46
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): A_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): A_ = PandasConfig def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __a : Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__a , (str, list, tuple) ): __a : Optional[Any] = data_files if isinstance(__a , __a ): __a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __a : str = [dl_manager.iter_files(__a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] __a : Tuple = [] for split_name, files in data_files.items(): if isinstance(__a , __a ): __a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __a : Any = [dl_manager.iter_files(__a ) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={'files': files} ) ) return splits def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __a : Optional[Any] = table_cast(__a , self.config.features.arrow_schema ) return pa_table def __UpperCAmelCase ( self , __a ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(__a ) ): with open(__a , 'rb' ) as f: __a : Any = pa.Table.from_pandas(pd.read_pickle(__a ) ) yield i, self._cast_table(__a )
476
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __UpperCamelCase ( unittest.TestCase ): A_ = JukeboxTokenizer A_ = { "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' import torch __a : Optional[int] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) __a : List[str] = tokenizer(**self.metas )['input_ids'] # fmt: off __a : str = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' import torch __a : Union[str, Any] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) __a : Tuple = tokenizer(**self.metas )['input_ids'] # fmt: off __a : Dict = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
476
1
'''simple docstring''' from __future__ import annotations from collections import deque class a : """simple docstring""" def __init__( self : Dict , snake_case : list[str] ) -> str: __UpperCAmelCase : list[dict] = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(snake_case ) self.set_fail_transitions() def lowerCamelCase__ ( self : Optional[Any] , snake_case : int , snake_case : str ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str ) -> None: __UpperCAmelCase : Tuple = 0 for character in keyword: __UpperCAmelCase : Optional[int] = self.find_next_state(snake_case , snake_case ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __UpperCAmelCase : Any = len(self.adlist ) - 1 else: __UpperCAmelCase : List[Any] = next_state self.adlist[current_state]["output"].append(snake_case ) def lowerCamelCase__ ( self : List[str] ) -> None: __UpperCAmelCase : deque = deque() for node in self.adlist[0]["next_states"]: q.append(snake_case ) __UpperCAmelCase : str = 0 while q: __UpperCAmelCase : int = q.popleft() for child in self.adlist[r]["next_states"]: q.append(snake_case ) __UpperCAmelCase : str = self.adlist[r]['''fail_state'''] while ( self.find_next_state(snake_case , self.adlist[child]['''value'''] ) is None and state != 0 ): __UpperCAmelCase : int = self.adlist[state]['''fail_state'''] __UpperCAmelCase : str = self.find_next_state( snake_case , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def lowerCamelCase__ ( self : List[Any] , snake_case : str ) -> dict[str, list[int]]: __UpperCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences __UpperCAmelCase : Optional[int] = 0 for i in range(len(snake_case ) ): while ( self.find_next_state(snake_case , string[i] ) is None and current_state != 0 ): __UpperCAmelCase : int = self.adlist[current_state]['''fail_state'''] __UpperCAmelCase : str = self.find_next_state(snake_case , string[i] ) if next_state is None: __UpperCAmelCase : Dict = 0 else: __UpperCAmelCase : Optional[int] = next_state for key in self.adlist[current_state]["output"]: if key not in result: __UpperCAmelCase : str = [] result[key].append(i - len(snake_case ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' def _a ( _lowercase : list[list[int]] , _lowercase : int , _lowercase : int , _lowercase : set ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __UpperCAmelCase : Optional[Any] = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
266
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_euler' ) lowercase = 'A painting of a squirrel eating a burger' lowercase = torch.manual_seed(0 ) lowercase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_euler' ) lowercase = 'A painting of a squirrel eating a burger' lowercase = torch.manual_seed(0 ) lowercase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) lowercase = 'A painting of a squirrel eating a burger' lowercase = torch.manual_seed(0 ) lowercase = sd_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=snake_case , ) lowercase = output.images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
84
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_: Optional[Any] = logging.get_logger(__name__) A_: Union[str, Any] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class _lowercase ( _UpperCAmelCase ): """simple docstring""" lowerCAmelCase__ = 'deformable_detr' lowerCAmelCase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=300 , UpperCAmelCase=1024 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=False , UpperCAmelCase=300 , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , UpperCAmelCase=False , **UpperCAmelCase , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): _lowercase = backbone_config.get("""model_type""" ) _lowercase = CONFIG_MAPPING[backbone_model_type] _lowercase = config_class.from_dict(UpperCAmelCase ) _lowercase = use_timm_backbone _lowercase = backbone_config _lowercase = num_channels _lowercase = num_queries _lowercase = max_position_embeddings _lowercase = d_model _lowercase = encoder_ffn_dim _lowercase = encoder_layers _lowercase = encoder_attention_heads _lowercase = decoder_ffn_dim _lowercase = decoder_layers _lowercase = decoder_attention_heads _lowercase = dropout _lowercase = attention_dropout _lowercase = activation_dropout _lowercase = activation_function _lowercase = init_std _lowercase = init_xavier_std _lowercase = encoder_layerdrop _lowercase = auxiliary_loss _lowercase = position_embedding_type _lowercase = backbone _lowercase = use_pretrained_backbone _lowercase = dilation # deformable attributes _lowercase = num_feature_levels _lowercase = encoder_n_points _lowercase = decoder_n_points _lowercase = two_stage _lowercase = two_stage_num_proposals _lowercase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher _lowercase = class_cost _lowercase = bbox_cost _lowercase = giou_cost # Loss coefficients _lowercase = mask_loss_coefficient _lowercase = dice_loss_coefficient _lowercase = bbox_loss_coefficient _lowercase = giou_loss_coefficient _lowercase = eos_coefficient _lowercase = focal_alpha _lowercase = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase ) @property def _UpperCAmelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def _UpperCAmelCase ( self ): '''simple docstring''' return self.d_model def _UpperCAmelCase ( self ): '''simple docstring''' _lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _lowercase = self.backbone_config.to_dict() _lowercase = self.__class__.model_type return output
398
0
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a__ = logging.getLogger(__name__) a__ = 5_0 # max width of layer names a__ = 7_0 # max width of quantizer names def _UpperCAmelCase ( a : Optional[Any] ): snake_case__ = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=a , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=a , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=a , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=a , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=a , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=a , type=a , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=a , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def _UpperCAmelCase ( a : List[Any] ): if args.calibrator == "max": snake_case__ = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) snake_case__ = """histogram""" elif args.calibrator == "mse": snake_case__ = """histogram""" else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) snake_case__ = QuantDescriptor(num_bits=args.aprec , calib_method=a ) snake_case__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(a ) quant_nn.QuantLinear.set_default_quant_desc_weight(a ) def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : Dict=False , a : int=False ): logger.info("""Configuring Model for Quantization""" ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(a , ["""embeddings"""] , which="""weight""" , _disabled=a ) if args.quant_disable: set_quantizer_by_name(a , [""""""] , _disabled=a ) if args.quant_disable_keyword: set_quantizer_by_name(a , args.quant_disable_keyword , _disabled=a ) if args.quant_disable_layer_module: set_quantizer_by_name(a , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=a ) if args.quant_enable_layer_module: set_quantizer_by_name(a , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=a ) if args.recalibrate_weights: recalibrate_weights(a ) if args.fuse_qkv: fuse_qkv(a , a ) if args.clip_gelu: clip_gelu(a , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(a ) def _UpperCAmelCase ( a : Any ): logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _UpperCAmelCase ( a : int , a : Dict ): logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(a ) def _UpperCAmelCase ( a : Dict , a : Dict ): def fusea(a : Dict , a : Union[str, Any] , a : Union[str, Any] ): for mod in [qq, qk, qv]: if not hasattr(a , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return snake_case__ = qq._amax.detach().item() snake_case__ = qk._amax.detach().item() snake_case__ = qv._amax.detach().item() snake_case__ = max(a , a , a ) qq._amax.fill_(a ) qk._amax.fill_(a ) qv._amax.fill_(a ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _UpperCAmelCase ( a : List[Any] , a : str ): for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): snake_case__ = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=a ) snake_case__ = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _UpperCAmelCase ( a : str ): for name, mod in model.named_modules(): if hasattr(a , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: snake_case__ = mod.weight.shape[0] snake_case__ = mod._weight_quantizer._amax.detach() snake_case__ = torch.ones(a , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _UpperCAmelCase ( a : Dict ): for name, mod in model.named_modules(): if hasattr(a , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) snake_case__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) snake_case__ = set(range(len(mod.weight.size() ) ) ) - axis_set snake_case__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=a , keepdims=a ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) snake_case__ = amax def _UpperCAmelCase ( a : Tuple , a : str=25 , a : List[str]=180 , a : Tuple=None ): if ignore is None: snake_case__ = [] elif not isinstance(a , a ): snake_case__ = [ignore] snake_case__ = 0 for name, mod in model.named_modules(): if not hasattr(a , """weight""" ): continue snake_case__ = max(a , len(a ) ) for name, mod in model.named_modules(): snake_case__ = getattr(a , """_input_quantizer""" , a ) snake_case__ = getattr(a , """_weight_quantizer""" , a ) if not hasattr(a , """weight""" ): continue if type(a ) in ignore: continue if [True for s in ignore if type(a ) is str and s in name]: continue snake_case__ = F'''Act:{input_q.extra_repr()}''' snake_case__ = F'''Wgt:{weight_q.extra_repr()}''' snake_case__ = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(a ) <= line_width: logger.info(a ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{' ':{name_width}} {wgt_str}''' ) def _UpperCAmelCase ( a : List[Any] ): snake_case__ = 0 for name, mod in model.named_modules(): if isinstance(a , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _UpperCAmelCase ( a : List[Any] , a : int , a : Optional[int] , a : Any , a : Optional[Any] ): snake_case__ = getattr(a , a , a ) if quantizer_mod is not None: assert hasattr(a , a ) setattr(a , a , a ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _UpperCAmelCase ( a : List[Any] , a : Optional[Any] , a : Optional[Any]="both" , **a : Any ): snake_case__ = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(a , a , """_input_quantizer""" , a , a ) if which in ["weight", "both"]: set_quantizer(a , a , """_weight_quantizer""" , a , a ) logger.info(a ) def _UpperCAmelCase ( a : str , a : Union[str, Any] , **a : List[Any] ): for name, mod in model.named_modules(): if hasattr(a , """_input_quantizer""" ) or hasattr(a , """_weight_quantizer""" ): for n in names: if re.search(a , a ): set_quantizers(a , a , **a ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(a , a ): snake_case__ = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(a , a , a ) logger.info(a )
99
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = VQModel _lowercase : str = '''sample''' @property def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple=(3_2, 3_2)): '''simple docstring''' snake_case__ = 4 snake_case__ = 3 snake_case__ = floats_tensor((batch_size, num_channels) + sizes).to(UpperCamelCase__) return {"sample": image} @property def __magic_name__ ( self : str): '''simple docstring''' return (3, 3_2, 3_2) @property def __magic_name__ ( self : List[str]): '''simple docstring''' return (3, 3_2, 3_2) def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = { """block_out_channels""": [3_2, 6_4], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 3, } snake_case__ = self.dummy_input return init_dict, inputs_dict def __magic_name__ ( self : Optional[int]): '''simple docstring''' pass def __magic_name__ ( self : Tuple): '''simple docstring''' pass def __magic_name__ ( self : str): '''simple docstring''' snake_case__ , snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) self.assertEqual(len(loading_info["""missing_keys"""]) , 0) model.to(UpperCamelCase__) snake_case__ = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""") model.to(UpperCamelCase__).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) snake_case__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) snake_case__ = image.to(UpperCamelCase__) with torch.no_grad(): snake_case__ = model(UpperCamelCase__).sample snake_case__ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case__ = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43]) # fmt: on self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3))
99
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class _lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" @register_to_config def __init__( self , UpperCAmelCase = 128 , UpperCAmelCase = 256 , UpperCAmelCase = 2_000.0 , UpperCAmelCase = 768 , UpperCAmelCase = 12 , UpperCAmelCase = 12 , UpperCAmelCase = 64 , UpperCAmelCase = 2048 , UpperCAmelCase = 0.1 , ) -> Dict: '''simple docstring''' super().__init__() __snake_case : Optional[Any] = nn.Sequential( nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , ) __snake_case : Any = nn.Embedding(_lowercase , _lowercase ) __snake_case : Union[str, Any] = False __snake_case : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) __snake_case : Union[str, Any] = nn.Dropout(p=_lowercase ) __snake_case : Tuple = nn.ModuleList() for lyr_num in range(_lowercase ): # FiLM conditional T5 decoder __snake_case : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase ) self.decoders.append(_lowercase ) __snake_case : List[Any] = TaLayerNorm(_lowercase ) __snake_case : Optional[Any] = nn.Dropout(p=_lowercase ) __snake_case : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __snake_case : Optional[int] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __snake_case : int = self.conditioning_emb(_lowercase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __snake_case : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __snake_case : Dict = torch.broadcast_to( torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __snake_case : Tuple = self.position_encoding(_lowercase ) __snake_case : Optional[Any] = self.continuous_inputs_projection(_lowercase ) inputs += position_encodings __snake_case : List[Any] = self.dropout(_lowercase ) # decoder: No padding present. __snake_case : Tuple = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __snake_case : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks] # cross attend style: concat encodings __snake_case : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __snake_case : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __snake_case : int = lyr( _lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0] __snake_case : int = self.decoder_norm(_lowercase ) __snake_case : Union[str, Any] = self.post_dropout(_lowercase ) __snake_case : int = self.spec_out(_lowercase ) return spec_out class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-6 ) -> Union[str, Any]: '''simple docstring''' super().__init__() __snake_case : Any = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.layer[0]( _lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , ) if encoder_hidden_states is not None: __snake_case : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) __snake_case : str = self.layer[1]( _lowercase , key_value_states=_lowercase , attention_mask=_lowercase , ) # Apply Film Conditional Feed Forward layer __snake_case : Any = self.layer[-1](_lowercase , _lowercase ) return (hidden_states,) class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' super().__init__() __snake_case : Any = TaLayerNorm(_lowercase ) __snake_case : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase ) __snake_case : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase ) __snake_case : List[Any] = nn.Dropout(_lowercase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.layer_norm(_lowercase ) if conditioning_emb is not None: __snake_case : str = self.FiLMLayer(_lowercase , _lowercase ) # Self-attention block __snake_case : List[Any] = self.attention(_lowercase ) __snake_case : List[str] = hidden_states + self.dropout(_lowercase ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase ) __snake_case : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase ) __snake_case : Optional[Any] = nn.Dropout(_lowercase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = self.layer_norm(_lowercase ) __snake_case : Optional[Any] = self.attention( _lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , ) __snake_case : Any = hidden_states + self.dropout(_lowercase ) return layer_output class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' super().__init__() __snake_case : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase ) __snake_case : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase ) __snake_case : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase ) __snake_case : Tuple = nn.Dropout(_lowercase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> str: '''simple docstring''' __snake_case : List[Any] = self.layer_norm(_lowercase ) if conditioning_emb is not None: __snake_case : Optional[int] = self.film(_lowercase , _lowercase ) __snake_case : int = self.DenseReluDense(_lowercase ) __snake_case : Optional[Any] = hidden_states + self.dropout(_lowercase ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' super().__init__() __snake_case : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) __snake_case : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) __snake_case : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) __snake_case : int = nn.Dropout(_lowercase ) __snake_case : Optional[int] = NewGELUActivation() def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : str = self.act(self.wi_a(_lowercase ) ) __snake_case : Dict = self.wi_a(_lowercase ) __snake_case : Any = hidden_gelu * hidden_linear __snake_case : List[Any] = self.dropout(_lowercase ) __snake_case : Tuple = self.wo(_lowercase ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=1E-6 ) -> str: '''simple docstring''' super().__init__() __snake_case : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) ) __snake_case : int = eps def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase ) __snake_case : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __snake_case : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def UpperCAmelCase ( self , UpperCAmelCase ) -> torch.Tensor: '''simple docstring''' return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_lowercase , 3.0 )) )) class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' super().__init__() __snake_case : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : List[Any] = self.scale_bias(_lowercase ) __snake_case : Any = torch.chunk(_lowercase , 2 , -1 ) __snake_case : Optional[Any] = x * (1 + scale) + shift return x
243
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def __lowerCAmelCase ( __UpperCamelCase : int ): '''simple docstring''' return 1 / (1 + np.exp(-z )) def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ): '''simple docstring''' return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean() def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ): '''simple docstring''' snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) ) def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ): '''simple docstring''' snake_case_ : Dict = np.zeros(x.shape[1] ) for iterations in range(__UpperCamelCase ): snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase ) snake_case_ : List[str] = sigmoid_function(__UpperCamelCase ) snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size snake_case_ : str = theta - alpha * gradient # updating the weights snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase ) snake_case_ : List[str] = sigmoid_function(__UpperCamelCase ) snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase ) if iterations % 1_0_0 == 0: print(F'loss: {j} \t' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": __lowerCAmelCase : Any = datasets.load_iris() __lowerCAmelCase : List[Any] = iris.data[:, :2] __lowerCAmelCase : Tuple = (iris.target != 0) * 1 __lowerCAmelCase : Any = 0.1 __lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def __lowerCAmelCase ( __UpperCamelCase : List[str] ): '''simple docstring''' return sigmoid_function( np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max()) ((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max()) ((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) __lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()] __lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
58
0
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets SCREAMING_SNAKE_CASE_ = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' SCREAMING_SNAKE_CASE_ = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' SCREAMING_SNAKE_CASE_ = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A__ ( A__ ) -> int: '''simple docstring''' def remove_articles(A__ ): _UpperCAmelCase = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(A__ , " " , A__ ) def white_space_fix(A__ ): return " ".join(text.split() ) def remove_punc(A__ ): _UpperCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(A__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) ) def A__ ( A__ , A__ ) -> Optional[int]: '''simple docstring''' return int(normalize_answer(A__ ) == normalize_answer(A__ ) ) def A__ ( A__ , A__ ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = [any(compute_exact(A__ , A__ ) for ref in refs ) for pred, refs in zip(A__ , A__ )] return (sum(A__ ) / len(A__ )) * 100 def A__ ( A__ , A__ , A__ , A__ ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _UpperCAmelCase = Counter(A__ ) _UpperCAmelCase = Counter(A__ ) _UpperCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _UpperCAmelCase = scount * numref _UpperCAmelCase = Counter(A__ ) _UpperCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _UpperCAmelCase = ccount * numref # KEEP _UpperCAmelCase = sgramcounter_rep & cgramcounter_rep _UpperCAmelCase = keepgramcounter_rep & rgramcounter _UpperCAmelCase = sgramcounter_rep & rgramcounter _UpperCAmelCase = 0 _UpperCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _UpperCAmelCase = 1 _UpperCAmelCase = 1 if len(A__ ) > 0: _UpperCAmelCase = keeptmpscorea / len(A__ ) if len(A__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _UpperCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _UpperCAmelCase = sgramcounter_rep - cgramcounter_rep _UpperCAmelCase = delgramcounter_rep - rgramcounter _UpperCAmelCase = sgramcounter_rep - rgramcounter _UpperCAmelCase = 0 _UpperCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _UpperCAmelCase = 1 if len(A__ ) > 0: _UpperCAmelCase = deltmpscorea / len(A__ ) # ADDITION _UpperCAmelCase = set(A__ ) - set(A__ ) _UpperCAmelCase = set(A__ ) & set(A__ ) _UpperCAmelCase = set(A__ ) - set(A__ ) _UpperCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _UpperCAmelCase = 1 _UpperCAmelCase = 1 if len(A__ ) > 0: _UpperCAmelCase = addtmpscore / len(A__ ) if len(A__ ) > 0: _UpperCAmelCase = addtmpscore / len(A__ ) _UpperCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A__ ( A__ , A__ , A__ ) -> Any: '''simple docstring''' _UpperCAmelCase = len(A__ ) _UpperCAmelCase = ssent.split(" " ) _UpperCAmelCase = csent.split(" " ) _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for rsent in rsents: _UpperCAmelCase = rsent.split(" " ) _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] ragramslist.append(A__ ) for i in range(0 , len(A__ ) - 1 ): if i < len(A__ ) - 1: _UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1] ragrams.append(A__ ) if i < len(A__ ) - 2: _UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(A__ ) if i < len(A__ ) - 3: _UpperCAmelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(A__ ) ragramslist.append(A__ ) ragramslist.append(A__ ) ragramslist.append(A__ ) for i in range(0 , len(A__ ) - 1 ): if i < len(A__ ) - 1: _UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1] sagrams.append(A__ ) if i < len(A__ ) - 2: _UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(A__ ) if i < len(A__ ) - 3: _UpperCAmelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(A__ ) for i in range(0 , len(A__ ) - 1 ): if i < len(A__ ) - 1: _UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1] cagrams.append(A__ ) if i < len(A__ ) - 2: _UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(A__ ) if i < len(A__ ) - 3: _UpperCAmelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(A__ ) ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(A__ , A__ , A__ , A__ ) ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(A__ , A__ , A__ , A__ ) ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(A__ , A__ , A__ , A__ ) ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(A__ , A__ , A__ , A__ ) _UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A__ ( A__ , A__ = True , A__ = "13a" , A__ = True ) -> Any: '''simple docstring''' if lowercase: _UpperCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(A__ )()(A__ ) else: _UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(A__ ) elif tokenizer == "moses": _UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(A__ , return_str=A__ , escape=A__ ) elif tokenizer == "penn": _UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(A__ , return_str=A__ ) else: _UpperCAmelCase = sentence if not return_str: _UpperCAmelCase = normalized_sent.split() return normalized_sent def A__ ( A__ , A__ , A__ ) -> Optional[Any]: '''simple docstring''' if not (len(A__ ) == len(A__ ) == len(A__ )): raise ValueError("Sources length must match predictions and references lengths." ) _UpperCAmelCase = 0 for src, pred, refs in zip(A__ , A__ , A__ ): sari_score += SARIsent(normalize(A__ ) , normalize(A__ ) , [normalize(A__ ) for sent in refs] ) _UpperCAmelCase = sari_score / len(A__ ) return 100 * sari_score def A__ ( A__ , A__ , A__="exp" , A__=None , A__=False , A__=False , A__=False , ) -> List[str]: '''simple docstring''' _UpperCAmelCase = len(references[0] ) if any(len(A__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) _UpperCAmelCase = [[refs[i] for refs in references] for i in range(A__ )] _UpperCAmelCase = sacrebleu.corpus_bleu( A__ , A__ , smooth_method=A__ , smooth_value=A__ , force=A__ , lowercase=A__ , use_effective_order=A__ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def __A ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> int: _UpperCAmelCase = {} result.update({"sari": compute_sari(sources=snake_case_ , predictions=snake_case_ , references=snake_case_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=snake_case_ , references=snake_case_ )} ) result.update({"exact": compute_em(predictions=snake_case_ , references=snake_case_ )} ) return result
579
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a : """simple docstring""" def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[Any]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def __A ( self ) -> int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]: _UpperCAmelCase = NystromformerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) _UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]: _UpperCAmelCase = NystromformerForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]: _UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = NystromformerForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str: _UpperCAmelCase = self.num_labels _UpperCAmelCase = NystromformerForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict: _UpperCAmelCase = self.num_choices _UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" A__ : List[Any] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) A__ : Dict = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) A__ : str = False A__ : Union[str, Any] = False def __A ( self ) -> Dict: _UpperCAmelCase = NystromformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def __A ( self ) -> int: self.config_tester.run_common_tests() def __A ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def __A ( self ) -> str: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*snake_case_ ) def __A ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def __A ( self ) -> List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case_ ) def __A ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case_ ) def __A ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case_ ) def __A ( self ) -> Dict: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def __A ( self ) -> Dict: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = NystromformerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_torch class a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self ) -> Any: _UpperCAmelCase = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) _UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _UpperCAmelCase = model(snake_case_ )[0] _UpperCAmelCase = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) ) @slow def __A ( self ) -> Any: _UpperCAmelCase = "the [MASK] of Belgium is Brussels" _UpperCAmelCase = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) _UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) _UpperCAmelCase = tokenizer(snake_case_ , return_tensors="pt" ) with torch.no_grad(): _UpperCAmelCase = model(encoding.input_ids ).logits _UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(snake_case_ ) , "capital" )
579
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ): lowerCamelCase_ : int = StableDiffusionPanoramaPipeline lowerCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS lowerCamelCase_ : int = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ : Tuple = DDIMScheduler() torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ : Optional[int] = CLIPTextModel(snake_case__ ) snake_case_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ : Tuple = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> str: '''simple docstring''' snake_case_ : Tuple = torch.manual_seed(snake_case__ ) snake_case_ : List[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : Dict = self.get_dummy_components() snake_case_ : Tuple = StableDiffusionPanoramaPipeline(**snake_case__ ) snake_case_ : int = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(snake_case__ ) snake_case_ : int = sd_pipe(**snake_case__ ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase (self ) -> int: '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : int = self.get_dummy_components() snake_case_ : Optional[Any] = StableDiffusionPanoramaPipeline(**snake_case__ ) snake_case_ : Union[str, Any] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) snake_case_ : List[Any] = self.get_dummy_inputs(snake_case__ ) snake_case_ : List[Any] = '''french fries''' snake_case_ : Union[str, Any] = sd_pipe(**snake_case__ , negative_prompt=snake_case__ ) snake_case_ : Optional[Any] = output.images snake_case_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Dict = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : Union[str, Any] = self.get_dummy_components() snake_case_ : Optional[Any] = StableDiffusionPanoramaPipeline(**snake_case__ ) snake_case_ : Dict = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) snake_case_ : Optional[Any] = self.get_dummy_inputs(snake_case__ ) snake_case_ : Optional[Any] = sd_pipe(**snake_case__ , view_batch_size=2 ) snake_case_ : str = output.images snake_case_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Tuple = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : List[str] = self.get_dummy_components() snake_case_ : List[Any] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) snake_case_ : Tuple = StableDiffusionPanoramaPipeline(**snake_case__ ) snake_case_ : Tuple = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) snake_case_ : Any = self.get_dummy_inputs(snake_case__ ) snake_case_ : List[Any] = sd_pipe(**snake_case__ ).images snake_case_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : List[str] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : List[Any] = self.get_dummy_components() snake_case_ : Optional[int] = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=snake_case__ ) snake_case_ : Union[str, Any] = StableDiffusionPanoramaPipeline(**snake_case__ ) snake_case_ : Union[str, Any] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) snake_case_ : str = self.get_dummy_inputs(snake_case__ ) snake_case_ : str = sd_pipe(**snake_case__ ).images snake_case_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Any = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase (self , __magic_name__=0 ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = torch.manual_seed(snake_case__ ) snake_case_ : List[str] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = '''stabilityai/stable-diffusion-2-base''' snake_case_ : str = DDIMScheduler.from_pretrained(snake_case__ , subfolder='''scheduler''' ) snake_case_ : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = self.get_inputs() snake_case_ : Dict = pipe(**snake_case__ ).images snake_case_ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) snake_case_ : Any = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=snake_case__ ) snake_case_ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() snake_case_ : Any = self.get_inputs() snake_case_ : Any = pipe(**snake_case__ ).images snake_case_ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) snake_case_ : Any = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = 0 def callback_fn(__magic_name__ , __magic_name__ , __magic_name__ ) -> None: snake_case_ : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case_ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) snake_case_ : Dict = latents[0, -3:, -3:, -1] snake_case_ : int = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: snake_case_ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) snake_case_ : Optional[Any] = latents[0, -3:, -3:, -1] snake_case_ : int = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 snake_case_ : str = False snake_case_ : int = '''stabilityai/stable-diffusion-2-base''' snake_case_ : Optional[int] = DDIMScheduler.from_pretrained(snake_case__ , subfolder='''scheduler''' ) snake_case_ : str = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ ) snake_case_ : Dict = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() snake_case_ : str = self.get_inputs() pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2-base''' snake_case_ : Tuple = DDIMScheduler.from_pretrained(snake_case__ , subfolder='''scheduler''' ) snake_case_ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ ) snake_case_ : List[str] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[str] = self.get_inputs() snake_case_ : Optional[Any] = pipe(**snake_case__ ) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
60
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel A = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 6_5_5_3_6, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 6_5_5_3_6, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 1_3_1_0_7_2, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, } def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]: return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2 def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]: _lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2 _lowerCamelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase ) class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' pass class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]: super().__init__() _lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 ) _lowerCamelCase = deepcopy(self.diffusion ) _lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ ) def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]: _lowerCamelCase = MODELS_MAP[model_name]['url'] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" A = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } A = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } A = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } A = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } A = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } A = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def lowerCamelCase ( UpperCamelCase : Tuple ) -> int: if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple: for key, value in ATTN_MAP.items(): if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ): return name.replace(UpperCamelCase , UpperCamelCase ) elif name.startswith(UpperCamelCase ): return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]: _lowerCamelCase = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _lowerCamelCase = 0 if string.startswith('net.3.' ): depth += 1 _lowerCamelCase = string[6:] elif string.startswith('net.' ): _lowerCamelCase = string[4:] while string.startswith('main.7.' ): depth += 1 _lowerCamelCase = string[7:] if string.startswith('main.' ): _lowerCamelCase = string[5:] # mid block if string[:2].isdigit(): _lowerCamelCase = string[:2] _lowerCamelCase = string[2:] else: _lowerCamelCase = string[0] _lowerCamelCase = string[1:] if depth == max_depth: _lowerCamelCase = MID_NUM_TO_LAYER[layer_num] _lowerCamelCase = 'mid_block' elif depth > 0 and int(UpperCamelCase ) < 7: _lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num] _lowerCamelCase = F"""down_blocks.{depth}""" elif depth > 0 and int(UpperCamelCase ) > 7: _lowerCamelCase = UP_NUM_TO_LAYER[layer_num] _lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: _lowerCamelCase = DEPTH_0_TO_LAYER[layer_num] _lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) _lowerCamelCase = string_left[1:] if "resnets" in new_layer: _lowerCamelCase = convert_resconv_naming(UpperCamelCase ) elif "attentions" in new_layer: _lowerCamelCase = convert_attn_naming(UpperCamelCase ) _lowerCamelCase = new_string_left if not isinstance(UpperCamelCase , UpperCamelCase ): _lowerCamelCase = prefix + '.' + new_layer + '.' + string_left else: _lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int: _lowerCamelCase = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _lowerCamelCase = rename(UpperCamelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(UpperCamelCase , UpperCamelCase ): _lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: _lowerCamelCase = v return new_state_dict def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]: if len(UpperCamelCase ) == 1: if len(v.shape ) == 3: # weight _lowerCamelCase = v[:, :, 0] else: # bias _lowerCamelCase = v else: # qkv matrices _lowerCamelCase = v.shape[0] _lowerCamelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _lowerCamelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]: _lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" _lowerCamelCase = download(UpperCamelCase ) _lowerCamelCase = MODELS_MAP[model_name]['sample_rate'] _lowerCamelCase = MODELS_MAP[model_name]['sample_size'] _lowerCamelCase = Object() _lowerCamelCase = sample_size _lowerCamelCase = sample_rate _lowerCamelCase = 0 _lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase ) _lowerCamelCase = diffusers_model.state_dict() _lowerCamelCase = DiffusionUncond(UpperCamelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] ) _lowerCamelCase = orig_model.diffusion_ema.eval() _lowerCamelCase = orig_model.state_dict() _lowerCamelCase = rename_orig_weights(UpperCamelCase ) _lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": _lowerCamelCase = value.squeeze() _lowerCamelCase = value diffusers_model.load_state_dict(UpperCamelCase ) _lowerCamelCase = 1_00 _lowerCamelCase = 33 _lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase ) _lowerCamelCase = torch.manual_seed(UpperCamelCase ) _lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase ) _lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1] _lowerCamelCase = get_crash_schedule(UpperCamelCase ) _lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) _lowerCamelCase = torch.manual_seed(33 ) _lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios _lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} ) _lowerCamelCase = generated.clamp(-1 , 1 ) _lowerCamelCase = (generated - audio).abs().sum() _lowerCamelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , UpperCamelCase ) print('Diff max' , UpperCamelCase ) assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') A = parser.parse_args() main(args)
544
0
'''simple docstring''' def _a( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = 0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =right or len(UpperCamelCase__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(UpperCamelCase__, UpperCamelCase__, left + 1, right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
665
'''simple docstring''' from math import isqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =False return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]] def _a( UpperCamelCase__ : int = 1_0**8 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
665
1
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = '''laion/clap-htsat-unfused''' __lowercase = tempfile.mkdtemp() def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase__ ) def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> str: '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = self.get_tokenizer() __lowercase = self.get_feature_extractor() __lowercase = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ ) processor.save_pretrained(self.tmpdirname ) __lowercase = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowercase = self.get_feature_extractor(do_normalize=lowercase__ , padding_value=1.0 ) __lowercase = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowercase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' __lowercase = self.get_feature_extractor() __lowercase = self.get_tokenizer() __lowercase = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ ) __lowercase = floats_list((3, 10_00) ) __lowercase = feature_extractor(lowercase__ , return_tensors='''np''' ) __lowercase = processor(audios=lowercase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.get_feature_extractor() __lowercase = self.get_tokenizer() __lowercase = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ ) __lowercase = '''This is a test string''' __lowercase = processor(text=lowercase__ ) __lowercase = tokenizer(lowercase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.get_feature_extractor() __lowercase = self.get_tokenizer() __lowercase = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ ) __lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase = processor.batch_decode(lowercase__ ) __lowercase = tokenizer.batch_decode(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.get_feature_extractor() __lowercase = self.get_tokenizer() __lowercase = ClapProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
534
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ): '''simple docstring''' __A =size if size is not None else {'''height''': 1_8, '''width''': 1_8} __A =parent __A =batch_size __A =num_channels __A =image_size __A =min_resolution __A =max_resolution __A =do_resize __A =size __A =apply_ocr def __UpperCamelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCamelCase ( self ): '''simple docstring''' __A =LayoutLMvaImageProcessingTester(self ) @property def __UpperCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''size''' ) ) self.assertTrue(hasattr(lowercase__ , '''apply_ocr''' ) ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) __A =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def __UpperCamelCase ( self ): '''simple docstring''' pass def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , lowercase__ ) self.assertIsInstance(encoding.boxes , lowercase__ ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =LayoutLMvaImageProcessor() from datasets import load_dataset __A =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) __A =Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A =image_processing(lowercase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowercase__ ) self.assertListEqual(encoding.boxes , lowercase__ ) # with apply_OCR = False __A =LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) __A =image_processing(lowercase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
184
0
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase ( _snake_case : dict ): '''simple docstring''' return (data["data"], data["target"]) def lowerCamelCase ( _snake_case : np.ndarray ,_snake_case : np.ndarray ,_snake_case : np.ndarray ): '''simple docstring''' lowercase__ = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_snake_case ,_snake_case ) # Predict target for test data lowercase__ = xgb.predict(_snake_case ) lowercase__ = predictions.reshape(len(_snake_case ) ,1 ) return predictions def lowerCamelCase ( ): '''simple docstring''' lowercase__ = fetch_california_housing() lowercase__ , lowercase__ = data_handling(_snake_case ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = train_test_split( _snake_case ,_snake_case ,test_size=0.25 ,random_state=1 ) lowercase__ = xgboost(_snake_case ,_snake_case ,_snake_case ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_snake_case ,_snake_case )}''' ) print(f'''Mean Square Error : {mean_squared_error(_snake_case ,_snake_case )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
702
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowerCamelCase ( _snake_case : str=None ): '''simple docstring''' if subparsers is not None: lowercase__ = subparsers.add_parser("env" ) else: lowercase__ = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" ,default=_snake_case ,help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=_snake_case ) return parser def lowerCamelCase ( _snake_case : Tuple ): '''simple docstring''' lowercase__ = torch.__version__ lowercase__ = torch.cuda.is_available() lowercase__ = is_xpu_available() lowercase__ = is_npu_available() lowercase__ = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(_snake_case ): lowercase__ = load_config_from_file(args.config_file ).to_dict() lowercase__ = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "PyTorch XPU available": str(_snake_case ), "PyTorch NPU available": str(_snake_case ), "System RAM": f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: lowercase__ = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) lowercase__ = ( "\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_snake_case ,_snake_case ) else f'''\t{accelerate_config}''' ) print(_snake_case ) lowercase__ = accelerate_config return info def lowerCamelCase ( ): '''simple docstring''' lowercase__ = env_command_parser() lowercase__ = parser.parse_args() env_command(_snake_case ) return 0 if __name__ == "__main__": raise SystemExit(main())
539
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : Optional[int] , __A : Optional[Any]=1_3 , __A : List[Any]=1_0 , __A : List[str]=3 , __A : List[str]=2 , __A : Tuple=2 , __A : int=True , __A : Dict=True , __A : int=3_2 , __A : Dict=5 , __A : Union[str, Any]=4 , __A : List[Any]=3_7 , __A : Any="gelu" , __A : List[str]=0.1 , __A : Dict=0.1 , __A : Union[str, Any]=1_0 , __A : Union[str, Any]=0.0_2 , __A : int="divided_space_time" , __A : Tuple=None , ): snake_case__ : Union[str, Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : Any = num_channels snake_case__ : List[str] = patch_size snake_case__ : int = num_frames snake_case__ : Optional[int] = is_training snake_case__ : int = use_labels snake_case__ : str = hidden_size snake_case__ : str = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : Any = intermediate_size snake_case__ : List[str] = hidden_act snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : Union[str, Any] = attention_probs_dropout_prob snake_case__ : Any = attention_type snake_case__ : Tuple = initializer_range snake_case__ : Union[str, Any] = scope snake_case__ : Tuple = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token snake_case__ : List[Any] = (image_size // patch_size) ** 2 snake_case__ : Dict = (num_frames) * self.num_patches_per_frame + 1 def _lowercase ( self : str ): snake_case__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Optional[Any] = None if self.use_labels: snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : Tuple = self.get_config() return config, pixel_values, labels def _lowercase ( self : List[str] ): snake_case__ : Dict = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) snake_case__ : Tuple = self.num_labels return config def _lowercase ( self : Union[str, Any] , __A : Any , __A : Union[str, Any] , __A : Union[str, Any] ): snake_case__ : Any = TimesformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() snake_case__ : Union[str, Any] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : str , __A : Union[str, Any] , __A : Optional[Any] , __A : int ): snake_case__ : Union[str, Any] = TimesformerForVideoClassification(snake_case__ ) model.to(snake_case__ ) model.eval() snake_case__ : str = model(snake_case__ ) # verify the logits shape snake_case__ : Optional[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , snake_case__ ) def _lowercase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__, snake_case__, snake_case__ : Optional[int] = config_and_inputs snake_case__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () a_ = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False def _lowercase ( self : List[str] ): snake_case__ : Tuple = TimesformerModelTester(self ) snake_case__ : List[str] = ConfigTester( self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def _lowercase ( self : List[str] , __A : int , __A : Union[str, Any] , __A : List[str]=False ): snake_case__ : int = copy.deepcopy(snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): snake_case__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def _lowercase ( self : Any ): pass def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : List[Any] = model_class(snake_case__ ) snake_case__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : int = [*signature.parameters.keys()] snake_case__ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _lowercase ( self : Any ): snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*snake_case__ ) @slow def _lowercase ( self : int ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : int = TimesformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _lowercase ( self : Union[str, Any] ): if not self.has_attentions: pass else: snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Any = True for model_class in self.all_model_classes: snake_case__ : int = self.model_tester.seq_length snake_case__ : List[str] = self.model_tester.num_frames snake_case__ : Dict = True snake_case__ : Tuple = False snake_case__ : int = True snake_case__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case__ : Union[str, Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case__ : Dict = outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case__ : str = True snake_case__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case__ : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case__ : Dict = outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) snake_case__ : Any = len(snake_case__ ) # Check attention is always last and order is fine snake_case__ : List[str] = True snake_case__ : Dict = True snake_case__ : Optional[int] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case__ : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(out_len + 1 , len(snake_case__ ) ) snake_case__ : Optional[int] = outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _lowercase ( self : List[Any] ): def check_hidden_states_output(__A : Any , __A : Optional[Any] , __A : Tuple ): snake_case__ : List[str] = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): snake_case__ : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) snake_case__ : Union[str, Any] = outputs.hidden_states snake_case__ : Any = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(snake_case__ ) , snake_case__ ) snake_case__ : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) snake_case__, snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[Any] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : Tuple = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : int = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) snake_case__ : Union[str, Any] = np.load(snake_case_ ) return list(snake_case_ ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Any ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowercase ( self : int ): snake_case__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( snake_case__ ) snake_case__ : Dict = self.default_image_processor snake_case__ : int = prepare_video() snake_case__ : Any = image_processor(video[:8] , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): snake_case__ : Union[str, Any] = model(**snake_case__ ) # verify the logits snake_case__ : Union[str, Any] = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , snake_case__ ) snake_case__ : int = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
297
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : str = "layer_norm" , snake_case__ : bool = False , ) -> Dict: super().__init__() _lowerCamelCase = only_cross_attention _lowerCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' _lowerCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCamelCase = AdaLayerNorm(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCamelCase = AdaLayerNormZero(snake_case__ , snake_case__ ) else: _lowerCamelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCamelCase = Attention( query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCamelCase = ( AdaLayerNorm(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) ) _lowerCamelCase = Attention( query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none else: _lowerCamelCase = None _lowerCamelCase = None # 3. Feed-forward _lowerCamelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCamelCase = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ ) # let chunk size default to None _lowerCamelCase = None _lowerCamelCase = 0 def _snake_case ( self : str , snake_case__ : Optional[int] , snake_case__ : int ) -> Optional[Any]: # Sets chunk feed-forward _lowerCamelCase = chunk_size _lowerCamelCase = dim def _snake_case ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[torch.LongTensor] = None , ) -> List[Any]: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: _lowerCamelCase = self.norma(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.norma( snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype ) else: _lowerCamelCase = self.norma(snake_case__ ) _lowerCamelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCamelCase = self.attna( snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , ) if self.use_ada_layer_norm_zero: _lowerCamelCase = gate_msa.unsqueeze(1 ) * attn_output _lowerCamelCase = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCamelCase = ( self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ ) ) _lowerCamelCase = self.attna( snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , ) _lowerCamelCase = attn_output + hidden_states # 3. Feed-forward _lowerCamelCase = self.norma(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCamelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" ) _lowerCamelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCamelCase = torch.cat( [self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: _lowerCamelCase = self.ff(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCamelCase = gate_mlp.unsqueeze(1 ) * ff_output _lowerCamelCase = ff_output + hidden_states return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] = None , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : str = "geglu" , snake_case__ : bool = False , ) -> Optional[int]: super().__init__() _lowerCamelCase = int(dim * mult ) _lowerCamelCase = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCamelCase = GELU(snake_case__ , snake_case__ ) if activation_fn == "gelu-approximate": _lowerCamelCase = GELU(snake_case__ , snake_case__ , approximate='tanh' ) elif activation_fn == "geglu": _lowerCamelCase = GEGLU(snake_case__ , snake_case__ ) elif activation_fn == "geglu-approximate": _lowerCamelCase = ApproximateGELU(snake_case__ , snake_case__ ) _lowerCamelCase = nn.ModuleList([] ) # project in self.net.append(snake_case__ ) # project dropout self.net.append(nn.Dropout(snake_case__ ) ) # project out self.net.append(nn.Linear(snake_case__ , snake_case__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(snake_case__ ) ) def _snake_case ( self : Tuple , snake_case__ : List[Any] ) -> Optional[int]: for module in self.net: _lowerCamelCase = module(snake_case__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : str , snake_case__ : int , snake_case__ : int , snake_case__ : str = "none" ) -> Any: super().__init__() _lowerCamelCase = nn.Linear(snake_case__ , snake_case__ ) _lowerCamelCase = approximate def _snake_case ( self : List[Any] , snake_case__ : List[Any] ) -> int: if gate.device.type != "mps": return F.gelu(snake_case__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _snake_case ( self : Optional[int] , snake_case__ : Union[str, Any] ) -> str: _lowerCamelCase = self.proj(snake_case__ ) _lowerCamelCase = self.gelu(snake_case__ ) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , snake_case__ : int , snake_case__ : int ) -> List[Any]: super().__init__() _lowerCamelCase = nn.Linear(snake_case__ , dim_out * 2 ) def _snake_case ( self : Optional[Any] , snake_case__ : Optional[int] ) -> Optional[Any]: if gate.device.type != "mps": return F.gelu(snake_case__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _snake_case ( self : List[str] , snake_case__ : Optional[Any] ) -> Optional[int]: _lowerCamelCase , _lowerCamelCase = self.proj(snake_case__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(snake_case__ ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , snake_case__ : int , snake_case__ : int ) -> Tuple: super().__init__() _lowerCamelCase = nn.Linear(snake_case__ , snake_case__ ) def _snake_case ( self : Optional[Any] , snake_case__ : List[Any] ) -> Optional[int]: _lowerCamelCase = self.proj(snake_case__ ) return x * torch.sigmoid(1.702 * x ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any ) -> Dict: super().__init__() _lowerCamelCase = nn.Embedding(snake_case__ , snake_case__ ) _lowerCamelCase = nn.SiLU() _lowerCamelCase = nn.Linear(snake_case__ , embedding_dim * 2 ) _lowerCamelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) def _snake_case ( self : Tuple , snake_case__ : str , snake_case__ : Dict ) -> Optional[int]: _lowerCamelCase = self.linear(self.silu(self.emb(snake_case__ ) ) ) _lowerCamelCase , _lowerCamelCase = torch.chunk(snake_case__ , 2 ) _lowerCamelCase = self.norm(snake_case__ ) * (1 + scale) + shift return x class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Any , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Dict: super().__init__() _lowerCamelCase = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ ) _lowerCamelCase = nn.SiLU() _lowerCamelCase = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ ) _lowerCamelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1e-6 ) def _snake_case ( self : Dict , snake_case__ : Tuple , snake_case__ : str , snake_case__ : str , snake_case__ : List[Any]=None ) -> int: _lowerCamelCase = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = emb.chunk(6 , dim=1 ) _lowerCamelCase = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[str] = None , snake_case__ : float = 1e-5 ) -> int: super().__init__() _lowerCamelCase = num_groups _lowerCamelCase = eps if act_fn is None: _lowerCamelCase = None else: _lowerCamelCase = get_activation(snake_case__ ) _lowerCamelCase = nn.Linear(snake_case__ , out_dim * 2 ) def _snake_case ( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] ) -> Union[str, Any]: if self.act: _lowerCamelCase = self.act(snake_case__ ) _lowerCamelCase = self.linear(snake_case__ ) _lowerCamelCase = emb[:, :, None, None] _lowerCamelCase , _lowerCamelCase = emb.chunk(2 , dim=1 ) _lowerCamelCase = F.group_norm(snake_case__ , self.num_groups , eps=self.eps ) _lowerCamelCase = x * (1 + scale) + shift return x
544
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase_ : Dict = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCamelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCamelCase__ = '''trajectory_transformer''' UpperCamelCase__ = ['''past_key_values'''] UpperCamelCase__ = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] ,a__ : Optional[int]=1_00 ,a__ : Optional[Any]=5 ,a__ : str=1 ,a__ : str=1 ,a__ : str=2_49 ,a__ : List[Any]=6 ,a__ : Union[str, Any]=17 ,a__ : Tuple=25 ,a__ : Dict=4 ,a__ : Union[str, Any]=4 ,a__ : int=1_28 ,a__ : Any=0.1 ,a__ : List[Any]=0.1 ,a__ : Tuple=0.1 ,a__ : Dict=0.0006 ,a__ : Optional[int]=5_12 ,a__ : Tuple=0.02 ,a__ : List[Any]=1e-12 ,a__ : List[Any]=1 ,a__ : Tuple=True ,a__ : Optional[int]=1 ,a__ : int=5_02_56 ,a__ : str=5_02_56 ,**a__ : Optional[Any] ,): a__ = vocab_size a__ = action_weight a__ = reward_weight a__ = value_weight a__ = max_position_embeddings a__ = block_size a__ = action_dim a__ = observation_dim a__ = transition_dim a__ = learning_rate a__ = n_layer a__ = n_head a__ = n_embd a__ = embd_pdrop a__ = attn_pdrop a__ = resid_pdrop a__ = initializer_range a__ = layer_norm_eps a__ = kaiming_initializer_range a__ = use_cache super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__ )
394
'''simple docstring''' def _lowerCAmelCase (_lowercase , _lowercase ): """simple docstring""" return int(input_a == input_a == 0 ) def _lowerCAmelCase (): """simple docstring""" print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' ) print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' ) print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' ) print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' ) if __name__ == "__main__": import doctest doctest.testmod() main()
394
1
'''simple docstring''' def _A ( A__ ): """simple docstring""" __lowercase = 0 __lowercase = len(A__ ) for i in range(n - 1 ): for j in range(i + 1 , A__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _A ( A__ ): """simple docstring""" if len(A__ ) <= 1: return arr, 0 __lowercase = len(A__ ) // 2 __lowercase = arr[0:mid] __lowercase = arr[mid:] __lowercase , __lowercase = count_inversions_recursive(A__ ) __lowercase , __lowercase = count_inversions_recursive(A__ ) __lowercase , __lowercase = _count_cross_inversions(A__ , A__ ) __lowercase = inversion_p + inversions_q + cross_inversions return c, num_inversions def _A ( A__ , A__ ): """simple docstring""" __lowercase = [] __lowercase = __lowercase = __lowercase = 0 while i < len(A__ ) and j < len(A__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(A__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(A__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _A ( ): """simple docstring""" __lowercase = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __lowercase = count_inversions_bf(A__ ) __lowercase , __lowercase = count_inversions_recursive(A__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , A__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __lowercase = count_inversions_bf(A__ ) __lowercase , __lowercase = count_inversions_recursive(A__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , A__ ) # an empty list should also have zero inversions __lowercase = [] __lowercase = count_inversions_bf(A__ ) __lowercase , __lowercase = count_inversions_recursive(A__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , A__ ) if __name__ == "__main__": main()
41
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' __SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer'] __SCREAMING_SNAKE_CASE : Tuple = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__(self , lowercase , lowercase=None ): super().__init__(lowercase ) A_ : Any = speaker_embeddings @classmethod def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: A_ : Any = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) A_ : str = None else: with open(lowercase ) as speaker_embeddings_json: A_ : List[str] = json.load(lowercase ) else: A_ : str = None A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase ) A_ : Optional[int] = {} A_ : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A_ : Union[str, Any] = self._load_voice_preset(lowercase ) A_ : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , ) A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' ) A_ : str = tmp_dict with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def _a (self , lowercase = None , **lowercase ): A_ : List[Any] = self.speaker_embeddings[voice_preset] A_ : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) A_ : int = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) A_ : Tuple = np.load(lowercase ) return voice_preset_dict def _a (self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A_ : Optional[int] = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ): A_ : Optional[int] = voice_preset + """.npz""" A_ : Any = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase ) A_ : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: A_ : Union[str, Any] = voice_preset return encoded_text
667
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class _A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): __A : Optional[int] = tempfile.mkdtemp() __A : Tuple = BlipImageProcessor() __A : List[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) __A : Dict = BlipaProcessor(_A , _A ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self , **_A ): return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer def UpperCAmelCase_ ( self , **_A ): return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor def UpperCAmelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): __A : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __A : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self ): __A : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __A : List[str] = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) __A : Optional[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCAmelCase_ ( self ): __A : Dict = self.get_image_processor() __A : List[Any] = self.get_tokenizer() __A : Dict = BlipaProcessor(tokenizer=_A , image_processor=_A ) __A : List[Any] = self.prepare_image_inputs() __A : Optional[int] = image_processor(_A , return_tensors='np' ) __A : Union[str, Any] = processor(images=_A , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase_ ( self ): __A : str = self.get_image_processor() __A : Union[str, Any] = self.get_tokenizer() __A : Any = BlipaProcessor(tokenizer=_A , image_processor=_A ) __A : str = 'lower newer' __A : Dict = processor(text=_A ) __A : List[Any] = tokenizer(_A , return_token_type_ids=_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase_ ( self ): __A : Optional[Any] = self.get_image_processor() __A : List[Any] = self.get_tokenizer() __A : Any = BlipaProcessor(tokenizer=_A , image_processor=_A ) __A : str = 'lower newer' __A : Union[str, Any] = self.prepare_image_inputs() __A : int = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def UpperCAmelCase_ ( self ): __A : List[str] = self.get_image_processor() __A : List[Any] = self.get_tokenizer() __A : str = BlipaProcessor(tokenizer=_A , image_processor=_A ) __A : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __A : Optional[int] = processor.batch_decode(_A ) __A : int = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase_ ( self ): __A : int = self.get_image_processor() __A : List[Any] = self.get_tokenizer() __A : Optional[int] = BlipaProcessor(tokenizer=_A , image_processor=_A ) __A : Dict = 'lower newer' __A : Tuple = self.prepare_image_inputs() __A : Any = processor(text=_A , images=_A ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
77
0
"""simple docstring""" __lowercase : Union[str, Any] = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) __lowercase : Any = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 1_2, """Pm""": 1_5, """Em""": 1_8, """Zm""": 2_1, """Ym""": 2_4, } def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ): lowerCamelCase_ = from_type.lower().strip('''s''' ) lowerCamelCase_ = to_type.lower().strip('''s''' ) lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCamelCase_ = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}""" ) raise ValueError(_lowerCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCamelCase_ = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}""" ) raise ValueError(_lowerCamelCase ) lowerCamelCase_ = METRIC_CONVERSION[from_sanitized] lowerCamelCase_ = METRIC_CONVERSION[to_sanitized] lowerCamelCase_ = 1 if from_exponent > to_exponent: lowerCamelCase_ = from_exponent - to_exponent else: lowerCamelCase_ = -(to_exponent - from_exponent) return value * pow(1_0 , _lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
142
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __lowercase : List[str] = logging.get_logger(__name__) class lowerCAmelCase ( a ): """simple docstring""" def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
142
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[int] = args.log_outputs _SCREAMING_SNAKE_CASE : int = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric _SCREAMING_SNAKE_CASE : List[Any] = load_metric("wer" ) _SCREAMING_SNAKE_CASE : Any = load_metric("cer" ) # compute metrics _SCREAMING_SNAKE_CASE : Any = wer.compute(references=result["target"], predictions=result["prediction"] ) _SCREAMING_SNAKE_CASE : Optional[int] = cer.compute(references=result["target"], predictions=result["prediction"] ) # print & log results _SCREAMING_SNAKE_CASE : int = f"""WER: {wer_result}\nCER: {cer_result}""" print(__lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""", "w" ) as f: f.write(__lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: _SCREAMING_SNAKE_CASE : str = f"""log_{dataset_id}_predictions.txt""" _SCREAMING_SNAKE_CASE : Optional[int] = f"""log_{dataset_id}_targets.txt""" with open(__lowerCamelCase, "w" ) as p, open(__lowerCamelCase, "w" ) as t: # mapping function to write output def write_to_file(__lowerCamelCase, __lowerCamelCase ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(__lowerCamelCase, with_indices=__lowerCamelCase ) def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[str] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training _SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(__lowerCamelCase, "", text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! _SCREAMING_SNAKE_CASE : Dict = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: _SCREAMING_SNAKE_CASE : List[str] = " ".join(text.split(__lowerCamelCase ) ) return text def lowerCamelCase__ (__lowerCamelCase ): # load dataset _SCREAMING_SNAKE_CASE : int = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=__lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) _SCREAMING_SNAKE_CASE : Any = feature_extractor.sampling_rate # resample audio _SCREAMING_SNAKE_CASE : Optional[Any] = dataset.cast_column("audio", Audio(sampling_rate=__lowerCamelCase ) ) # load eval pipeline if args.device is None: _SCREAMING_SNAKE_CASE : Optional[int] = 0 if torch.cuda.is_available() else -1 _SCREAMING_SNAKE_CASE : Optional[Any] = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[Any] = asr( batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) _SCREAMING_SNAKE_CASE : List[Any] = prediction["text"] _SCREAMING_SNAKE_CASE : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples _SCREAMING_SNAKE_CASE : Any = dataset.map(__lowerCamelCase, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowerCamelCase, __lowerCamelCase ) if __name__ == "__main__": UpperCamelCase__ =argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) UpperCamelCase__ =parser.parse_args() main(args)
381
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 42 class lowerCAmelCase__( __lowercase , __lowercase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 3 , __lowerCamelCase = ("DownEncoderBlock2D",) , __lowerCamelCase = ("UpDecoderBlock2D",) , __lowerCamelCase = (6_4,) , __lowerCamelCase = 1 , __lowerCamelCase = "silu" , __lowerCamelCase = 3 , __lowerCamelCase = 3_2 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 3_2 , __lowerCamelCase = None , __lowerCamelCase = 0.1_8215 , __lowerCamelCase = "group" , ) -> List[str]: super().__init__() # pass init params to Encoder _SCREAMING_SNAKE_CASE : Tuple = Encoder( in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , down_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , double_z=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels _SCREAMING_SNAKE_CASE : str = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 ) _SCREAMING_SNAKE_CASE : Tuple = VectorQuantizer(__lowerCamelCase , __lowerCamelCase , beta=0.25 , remap=__lowerCamelCase , sane_index_shape=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 ) # pass init params to Decoder _SCREAMING_SNAKE_CASE : Dict = Decoder( in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , up_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , norm_type=__lowerCamelCase , ) @apply_forward_hook def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = True ) -> VQEncoderOutput: _SCREAMING_SNAKE_CASE : Optional[int] = self.encoder(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = self.quant_conv(__lowerCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=__lowerCamelCase ) @apply_forward_hook def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.quantize(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE : Dict = h _SCREAMING_SNAKE_CASE : str = self.post_quant_conv(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self.decoder(__lowerCamelCase , quant if self.config.norm_type == "spatial" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: _SCREAMING_SNAKE_CASE : List[Any] = sample _SCREAMING_SNAKE_CASE : List[str] = self.encode(__lowerCamelCase ).latents _SCREAMING_SNAKE_CASE : List[str] = self.decode(__lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__lowerCamelCase )
381
1
"""simple docstring""" import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE : int = logging.getLogger() SCREAMING_SNAKE_CASE : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCamelCase ( __lowercase ): def A__ (self , lowerCamelCase ): '''simple docstring''' os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) _lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""} _lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] ) with open(os.path.join(lowerCamelCase , f"""{split}.{field}""" ) , """w""" ) as f: f.write(lowerCamelCase ) def A__ (self , lowerCamelCase , lowerCamelCase = "pytorch" ): '''simple docstring''' _lowerCAmelCase = self.get_auto_remove_tmp_dir() _lowerCAmelCase = os.path.join(lowerCamelCase , """output""" ) _lowerCAmelCase = os.path.join(lowerCamelCase , """data""" ) self._create_dummy_data(data_dir=lowerCamelCase ) _lowerCAmelCase = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("""--fp16""" ) else: testargs.append("""--gpus=0""" ) testargs.append("""--distributed_backend=ddp_cpu""" ) testargs.append("""--num_processes=2""" ) _lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(lowerCamelCase , env=self.get_env() ) _lowerCAmelCase = os.path.join(lowerCamelCase , """metrics.json""" ) with open(lowerCamelCase ) as f: _lowerCAmelCase = json.load(lowerCamelCase ) return result @require_torch_gpu def A__ (self ): '''simple docstring''' _lowerCAmelCase = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu def A__ (self ): '''simple docstring''' _lowerCAmelCase = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_gpu @require_ray def A__ (self ): '''simple docstring''' _lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu @require_ray def A__ (self ): '''simple docstring''' _lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
156
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'canine' def __init__(self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=16_384 , lowerCamelCase=16 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase=0Xe_0_0_0 , lowerCamelCase=0Xe_0_0_1 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=16_384 , lowerCamelCase=128 , **lowerCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase ) _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = type_vocab_size _lowerCAmelCase = layer_norm_eps # Character config: _lowerCAmelCase = downsampling_rate _lowerCAmelCase = upsampling_kernel_size _lowerCAmelCase = num_hash_functions _lowerCAmelCase = num_hash_buckets _lowerCAmelCase = local_transformer_stride
156
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE : Tuple = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
import doctest from collections import deque import numpy as np class UpperCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] ) -> None: SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1] SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4] def lowercase_ ( self : List[str] ) -> list[float]: SCREAMING_SNAKE_CASE__ = len(self.first_signal ) SCREAMING_SNAKE_CASE__ = len(self.second_signal ) SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase ) # create a zero matrix of max_length x max_length SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE__ = deque(self.second_signal ) rotated_signal.rotate(__lowerCamelCase ) for j, item in enumerate(__lowerCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(__lowerCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
472
0
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCAmelCase_ : Tuple = '''<<<<<<< This should probably be modified because it mentions: ''' UpperCAmelCase_ : Optional[int] = '''======= >>>>>>> ''' UpperCAmelCase_ : Tuple = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] UpperCAmelCase_ : Optional[int] = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _UpperCamelCase (_lowerCamelCase : Namespace )-> int: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class lowerCAmelCase ( __lowerCAmelCase): @staticmethod def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' __snake_case = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' __snake_case = get_logger('''datasets-cli/converting''' ) __snake_case = tfds_path __snake_case = datasets_directory def lowerCAmelCase ( self ) -> int: '''simple docstring''' if os.path.isdir(self._tfds_path ): __snake_case = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __snake_case = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) __snake_case = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) __snake_case = [] __snake_case = [] __snake_case = {} if os.path.isdir(self._tfds_path ): __snake_case = os.listdir(__SCREAMING_SNAKE_CASE ) else: __snake_case = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f: __snake_case = f.readlines() __snake_case = [] __snake_case = False __snake_case = False __snake_case = [] for line in lines: __snake_case = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __snake_case = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here __snake_case = '''''' continue elif "from absl import logging" in out_line: __snake_case = '''from datasets import logging\n''' elif "getLogger" in out_line: __snake_case = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __snake_case = True __snake_case = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' ) out_lines.append(__SCREAMING_SNAKE_CASE ) out_lines.append(__SCREAMING_SNAKE_CASE ) continue else: for pattern, replacement in TO_CONVERT: __snake_case = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __snake_case = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) __snake_case = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __snake_case = True out_lines.append(__SCREAMING_SNAKE_CASE ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __snake_case = f_name.replace('''.py''' , '''''' ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__SCREAMING_SNAKE_CASE ) if needs_manual_update: with_manual_update.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: __snake_case = os.path.basename(__SCREAMING_SNAKE_CASE ) __snake_case = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
24
"""simple docstring""" import re def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
46
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: Tuple=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: str=True ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=99 ,lowerCamelCase_: Union[str, Any]=36 ,lowerCamelCase_: Dict=3 ,lowerCamelCase_: int=4 ,lowerCamelCase_: Tuple=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: int=0.1 ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: int=512 ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[Any]=6 ,lowerCamelCase_: Union[str, Any]=6 ,lowerCamelCase_: Tuple=3 ,lowerCamelCase_: Tuple=4 ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: str=1000 ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Optional[Any] = patch_size UpperCAmelCase_ : Dict = text_seq_length UpperCAmelCase_ : int = is_training UpperCAmelCase_ : int = use_input_mask UpperCAmelCase_ : Union[str, Any] = use_token_type_ids UpperCAmelCase_ : List[str] = use_labels UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Tuple = type_sequence_label_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : List[Any] = coordinate_size UpperCAmelCase_ : int = shape_size UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : str = num_choices UpperCAmelCase_ : Any = scope UpperCAmelCase_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase_ : Optional[Any] = text_seq_length UpperCAmelCase_ : Any = (image_size // patch_size) ** 2 + 1 UpperCAmelCase_ : List[Any] = self.text_seq_length + self.image_seq_length def A__ ( self: str ) -> Any: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : Any = bbox[i, j, 2] UpperCAmelCase_ : int = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase_ : Tuple = None if self.use_token_type_ids: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels ) UpperCAmelCase_ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def A__ ( self: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ) -> Dict: UpperCAmelCase_ : Dict = LayoutLMvaModel(config=__A ) model.to(__A ) model.eval() # text + image UpperCAmelCase_ : Optional[Any] = model(__A ,pixel_values=__A ) UpperCAmelCase_ : int = model( __A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ) UpperCAmelCase_ : List[Any] = model(__A ,bbox=__A ,pixel_values=__A ,token_type_ids=__A ) UpperCAmelCase_ : str = model(__A ,bbox=__A ,pixel_values=__A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase_ : Any = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase_ : Optional[int] = model(pixel_values=__A ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Any = LayoutLMvaForSequenceClassification(__A ) model.to(__A ) model.eval() UpperCAmelCase_ : Tuple = model( __A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ) -> str: UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Dict = LayoutLMvaForTokenClassification(config=__A ) model.to(__A ) model.eval() UpperCAmelCase_ : List[str] = model( __A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int ) -> List[str]: UpperCAmelCase_ : List[Any] = LayoutLMvaForQuestionAnswering(config=__A ) model.to(__A ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( __A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,start_positions=__A ,end_positions=__A ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Any = config_and_inputs UpperCAmelCase_ : Union[str, Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _snake_case ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' A__ : Tuple = False A__ : Dict = False A__ : List[str] = False A__ : Dict = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def A__ ( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: return True def A__ ( self: int ) -> Any: UpperCAmelCase_ : Optional[Any] = LayoutLMvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=__A ,hidden_size=37 ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str]=False ) -> Optional[int]: UpperCAmelCase_ : List[Any] = copy.deepcopy(__A ) if model_class in get_values(__A ): UpperCAmelCase_ : Tuple = { k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous() if isinstance(__A ,torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__A ): UpperCAmelCase_ : Union[str, Any] = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=__A ) elif model_class in get_values(__A ): UpperCAmelCase_ : Optional[int] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__A ) UpperCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__A ) elif model_class in [ *get_values(__A ), ]: UpperCAmelCase_ : Any = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__A ) elif model_class in [ *get_values(__A ), ]: UpperCAmelCase_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=__A ,) return inputs_dict def A__ ( self: str ) -> Optional[int]: self.config_tester.run_common_tests() def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__A ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def A__ ( self: Any ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__A ) def A__ ( self: Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) @slow def A__ ( self: Optional[int] ) -> Optional[int]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[str] = LayoutLMvaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: str ) -> int: return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None @slow def A__ ( self: str ) -> str: UpperCAmelCase_ : Optional[Any] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__A ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : str = prepare_img() UpperCAmelCase_ : str = image_processor(images=__A ,return_tensors="""pt""" ).pixel_values.to(__A ) UpperCAmelCase_ : Dict = torch.tensor([[1, 2]] ) UpperCAmelCase_ : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass UpperCAmelCase_ : Dict = model( input_ids=input_ids.to(__A ) ,bbox=bbox.to(__A ) ,pixel_values=pixel_values.to(__A ) ,) # verify the logits UpperCAmelCase_ : Tuple = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape ,__A ) UpperCAmelCase_ : List[Any] = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__A ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__A ,atol=1e-4 ) )
715
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
322
0
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Any ): __lowercase = old_name if "patch_embed" in old_name: __lowercase , __lowercase , __lowercase = old_name.split('''.''' ) if layer == "0": __lowercase = old_name.replace('''0''' , '''convolution1''' ) elif layer == "1": __lowercase = old_name.replace('''1''' , '''batchnorm_before''' ) elif layer == "3": __lowercase = old_name.replace('''3''' , '''convolution2''' ) else: __lowercase = old_name.replace('''4''' , '''batchnorm_after''' ) if "network" in old_name and re.search(r'''\d\.\d''' , lowerCamelCase_ ): __lowercase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ): __lowercase = re.search(r'''\d\.\d\d.''' , lowerCamelCase_ ).group() else: __lowercase = re.search(r'''\d\.\d.''' , lowerCamelCase_ ).group() if int(match[0] ) < 6: __lowercase = old_name.replace(lowerCamelCase_ , '''''' ) __lowercase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) __lowercase = '''intermediate_stages.''' + trimmed_name else: __lowercase = old_name.replace(lowerCamelCase_ , '''''' ) if int(match[2] ) < num_meta4D_last_stage: __lowercase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] ) else: __lowercase = str(int(match[2] ) - num_meta4D_last_stage ) __lowercase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: __lowercase = trimmed_name.replace('''norm1''' , '''layernorm1''' ) elif "norm2" in old_name: __lowercase = trimmed_name.replace('''norm2''' , '''layernorm2''' ) elif "fc1" in old_name: __lowercase = trimmed_name.replace('''fc1''' , '''linear_in''' ) elif "fc2" in old_name: __lowercase = trimmed_name.replace('''fc2''' , '''linear_out''' ) __lowercase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''' , lowerCamelCase_ ): __lowercase = old_name.replace('''network''' , '''intermediate_stages''' ) if "fc" in new_name: __lowercase = new_name.replace('''fc''' , '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowercase = new_name.replace('''norm1''' , '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowercase = new_name.replace('''norm2''' , '''batchnorm_after''' ) if "proj" in new_name: __lowercase = new_name.replace('''proj''' , '''projection''' ) if "dist_head" in new_name: __lowercase = new_name.replace('''dist_head''' , '''distillation_classifier''' ) elif "head" in new_name: __lowercase = new_name.replace('''head''' , '''classifier''' ) elif "patch_embed" in new_name: __lowercase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowercase = new_name.replace('''norm''' , '''layernorm''' ) __lowercase = '''efficientformer.''' + new_name else: __lowercase = '''efficientformer.encoder.''' + new_name return new_name def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : int ): for key in checkpoint.copy().keys(): __lowercase = checkpoint.pop(lowerCamelCase_ ) __lowercase = val return checkpoint def _lowerCAmelCase ( ): __lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) return image def _lowerCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : bool ): __lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model'''] __lowercase = EfficientFormerConfig.from_json_file(lowerCamelCase_ ) __lowercase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase_ ) __lowercase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) __lowercase = config.depths[-1] - config.num_metaad_blocks + 1 __lowercase = convert_torch_checkpoint(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) model.eval() __lowercase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowercase = prepare_img() __lowercase = 2_5_6 __lowercase = 2_2_4 __lowercase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , ) __lowercase = processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values # original processing pipeline __lowercase = Compose( [ Resize(lowerCamelCase_ , interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(lowerCamelCase_ ), ToTensor(), Normalize(lowerCamelCase_ , lowerCamelCase_ ), ] ) __lowercase = image_transforms(lowerCamelCase_ ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ ) __lowercase = model(lowerCamelCase_ ) __lowercase = outputs.logits __lowercase = (1, 1_0_0_0) if "l1" in model_name: __lowercase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] ) assert torch.allclose(logits[0, :1_0] , lowerCamelCase_ , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: __lowercase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] ) assert torch.allclose(logits[0, :1_0] , lowerCamelCase_ , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: __lowercase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] ) assert logits.shape == expected_shape else: raise ValueError( f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) processor.save_pretrained(lowerCamelCase_ ) print(f"Processor successfuly saved at {pytorch_dump_path}" ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add model''' , use_temp_dir=lowerCamelCase_ , ) processor.push_to_hub( repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase_ , ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
502
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
502
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel a_ = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } a_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase = create_model( '''HTSAT-tiny''' , '''roberta''' , _lowercase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowercase , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: """simple docstring""" __UpperCamelCase = {} __UpperCamelCase = r'''.*sequential.(\d+).*''' __UpperCamelCase = r'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __UpperCamelCase = key.replace(_lowercase , _lowercase ) if re.match(_lowercase , _lowercase ): # replace sequential layers with list __UpperCamelCase = re.match(_lowercase , _lowercase ).group(1 ) __UpperCamelCase = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_lowercase )//3}.linear." ) elif re.match(_lowercase , _lowercase ): __UpperCamelCase = int(re.match(_lowercase , _lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __UpperCamelCase = 1 if projecton_layer == 0 else 2 __UpperCamelCase = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." ) if "audio" and "qkv" in key: # split qkv into query key and value __UpperCamelCase = value __UpperCamelCase = mixed_qkv.size(0 ) // 3 __UpperCamelCase = mixed_qkv[:qkv_dim] __UpperCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2] __UpperCamelCase = mixed_qkv[qkv_dim * 2 :] __UpperCamelCase = query_layer __UpperCamelCase = key_layer __UpperCamelCase = value_layer else: __UpperCamelCase = value return model_state_dict def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> str: """simple docstring""" __UpperCamelCase , __UpperCamelCase = init_clap(_lowercase , enable_fusion=_lowercase ) clap_model.eval() __UpperCamelCase = clap_model.state_dict() __UpperCamelCase = rename_state_dict(_lowercase ) __UpperCamelCase = ClapConfig() __UpperCamelCase = enable_fusion __UpperCamelCase = ClapModel(_lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowercase , strict=_lowercase ) model.save_pretrained(_lowercase ) transformers_config.save_pretrained(_lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") a_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
718
from abc import ABC, abstractmethod from argparse import ArgumentParser class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" @staticmethod @abstractmethod def snake_case ( snake_case : ArgumentParser ): raise NotImplementedError() @abstractmethod def snake_case ( self : Optional[Any] ): raise NotImplementedError()
375
0
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __magic_name__ ( __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCamelCase__ = checkpoint["""model"""] remove_ignore_keys_(__a ) UpperCamelCase__ = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCamelCase__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCamelCase__ = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCamelCase__ = XGLMForCausalLM(__a ) UpperCamelCase__ = model.load_state_dict(__a , strict=__a ) print(__a ) UpperCamelCase__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
513
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser( description=( '''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''bert''', choices=['''bert''']) parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') lowerCamelCase_ = parser.parse_args() if args.model_type == "bert": lowerCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase_ = '''bert''' else: raise ValueError('''args.model_type should be "bert".''') lowerCamelCase_ = model.state_dict() lowerCamelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase_ = state_dict[f'{prefix}.embeddings.{w}.weight'] for w in ["weight", "bias"]: lowerCamelCase_ = state_dict[f'{prefix}.embeddings.LayerNorm.{w}'] lowerCamelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}' ] lowerCamelCase_ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}' ] std_idx += 1 lowerCamelCase_ = state_dict['''cls.predictions.decoder.weight'''] lowerCamelCase_ = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase_ = state_dict[f'cls.predictions.transform.dense.{w}'] lowerCamelCase_ = state_dict[f'cls.predictions.transform.LayerNorm.{w}'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
513
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''MobileViTFeatureExtractor'''] __lowerCAmelCase = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
396
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
396
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : int = {"vocab_file": "vocab.txt"} lowercase__ : Any = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } lowercase__ : List[Any] = { "openbmb/cpm-ant-10b": 1024, } def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = collections.OrderedDict() with open(_A , "r" , encoding="utf-8" ) as reader: snake_case_ = reader.readlines() for index, token in enumerate(_A ): snake_case_ = token.rstrip("\n" ) snake_case_ = index return vocab class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : str , __lowercase : Union[str, Any] , __lowercase : Dict="<unk>" , __lowercase : int=2_00 ): """simple docstring""" snake_case_ = vocab snake_case_ = unk_token snake_case_ = max_input_chars_per_word def snake_case__ ( self : Any , __lowercase : Union[str, Any] ): """simple docstring""" snake_case_ = list(__lowercase ) if len(__lowercase ) > self.max_input_chars_per_word: return [self.unk_token] snake_case_ = 0 snake_case_ = [] while start < len(__lowercase ): snake_case_ = len(__lowercase ) snake_case_ = None while start < end: snake_case_ = "".join(chars[start:end] ) if substr in self.vocab: snake_case_ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__lowercase ) snake_case_ = end return sub_tokens class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] lowerCAmelCase_ = False def __init__( self : Optional[int] , __lowercase : str , __lowercase : Dict="<d>" , __lowercase : Optional[int]="</d>" , __lowercase : Tuple="<s>" , __lowercase : List[Any]="</s>" , __lowercase : Dict="<pad>" , __lowercase : Optional[int]="<unk>" , __lowercase : Dict="</n>" , __lowercase : int="</_>" , __lowercase : Tuple="left" , **__lowercase : List[str] , ): """simple docstring""" requires_backends(self , ["jieba"] ) super().__init__( bod_token=__lowercase , eod_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , unk_token=__lowercase , line_token=__lowercase , space_token=__lowercase , padding_side=__lowercase , **__lowercase , ) snake_case_ = bod_token snake_case_ = eod_token snake_case_ = load_vocab(__lowercase ) snake_case_ = self.encoder[space_token] snake_case_ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) ) snake_case_ = {v: k for k, v in self.encoder.items()} snake_case_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def snake_case__ ( self : Any ): """simple docstring""" return self.encoder[self.bod_token] @property def snake_case__ ( self : Optional[int] ): """simple docstring""" return self.encoder[self.eod_token] @property def snake_case__ ( self : int ): """simple docstring""" return self.encoder["\n"] @property def snake_case__ ( self : Dict ): """simple docstring""" return len(self.encoder ) def snake_case__ ( self : Dict ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def snake_case__ ( self : List[Any] , __lowercase : int ): """simple docstring""" snake_case_ = [] for x in jieba.cut(__lowercase , cut_all=__lowercase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) ) return output_tokens def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , **__lowercase : int ): """simple docstring""" snake_case_ = [i for i in token_ids if i >= 0] snake_case_ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__lowercase , **__lowercase ) def snake_case__ ( self : Any , __lowercase : Optional[Any] ): """simple docstring""" return token in self.encoder def snake_case__ ( self : Optional[int] , __lowercase : List[str] ): """simple docstring""" return "".join(__lowercase ) def snake_case__ ( self : int , __lowercase : List[str] ): """simple docstring""" return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def snake_case__ ( self : List[Any] , __lowercase : Union[str, Any] ): """simple docstring""" return self.decoder.get(__lowercase , self.unk_token ) def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" if os.path.isdir(__lowercase ): snake_case_ = os.path.join( __lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: snake_case_ = (filename_prefix + "-" if filename_prefix else "") + save_directory snake_case_ = 0 if " " in self.encoder: snake_case_ = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: snake_case_ = self.encoder["\n"] del self.encoder["\n"] snake_case_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) ) with open(__lowercase , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) snake_case_ = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def snake_case__ ( self : int , __lowercase : List[int] , __lowercase : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def snake_case__ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) return [1] + ([0] * len(__lowercase ))
376
import numpy as np import qiskit def lowerCamelCase__ ( _A = 8 , _A = None ): '''simple docstring''' snake_case_ = np.random.default_rng(seed=_A ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case_ = 6 * key_len # Measurement basis for Alice's qubits. snake_case_ = rng.integers(2 , size=_A ) # The set of states Alice will prepare. snake_case_ = rng.integers(2 , size=_A ) # Measurement basis for Bob's qubits. snake_case_ = rng.integers(2 , size=_A ) # Quantum Circuit to simulate BB84 snake_case_ = qiskit.QuantumCircuit(_A , name="BB84" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_A ): if alice_state[index] == 1: bbaa_circ.x(_A ) if alice_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_A ): if bob_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case_ = qiskit.Aer.get_backend("aer_simulator" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case_ = qiskit.execute(_A , _A , shots=1 , seed_simulator=_A ) # Returns the result of measurement. snake_case_ = job.result().get_counts(_A ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case_ = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _A , _A , _A ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case_ = gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , "0" ) return key if __name__ == "__main__": print(f'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
376
1
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = MobileBertTokenizer UpperCAmelCase__ = MobileBertTokenizerFast UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = filter_non_english UpperCAmelCase__ = "google/mobilebert-uncased" def __lowercase( self ) -> int: super().setUp() __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __UpperCamelCase = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str: __UpperCamelCase = 'UNwant\u00E9d,running' __UpperCamelCase = 'unwanted, running' return input_text, output_text def __lowercase( self ) -> Union[str, Any]: __UpperCamelCase = self.tokenizer_class(self.vocab_file ) __UpperCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] ) def __lowercase( self ) -> Dict: if not self.test_rust_tokenizer: return __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = self.get_rust_tokenizer() __UpperCamelCase = 'UNwant\u00E9d,running' __UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.get_rust_tokenizer() __UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # With lower casing __UpperCamelCase = self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.get_rust_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = 'UNwant\u00E9d,running' __UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.get_rust_tokenizer() __UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> List[Any]: __UpperCamelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def __lowercase( self ) -> Optional[int]: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase( self ) -> Optional[Any]: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def __lowercase( self ) -> Dict: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase( self ) -> Tuple: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __lowercase( self ) -> Dict: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase( self ) -> List[str]: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase( self ) -> str: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase( self ) -> str: __UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __lowercase( self ) -> Any: __UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __UpperCamelCase = {} for i, token in enumerate(_SCREAMING_SNAKE_CASE ): __UpperCamelCase = i __UpperCamelCase = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def __lowercase( self ) -> List[Any]: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __lowercase( self ) -> Tuple: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __lowercase( self ) -> Any: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def __lowercase( self ) -> Tuple: __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def __lowercase( self ) -> List[str]: __UpperCamelCase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' ) __UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __lowercase( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __UpperCamelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __UpperCamelCase = tokenizer_r.encode_plus( _SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , ) __UpperCamelCase = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False __UpperCamelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def __lowercase( self ) -> int: __UpperCamelCase = ['的', '人', '有'] __UpperCamelCase = ''.join(_SCREAMING_SNAKE_CASE ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __UpperCamelCase = True __UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = False __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that only the first Chinese character is not preceded by "##". __UpperCamelCase = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE ) ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
710
import os import sys import unittest _snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _snake_case = os.path.join(git_repo_path, 'src', 'transformers') _snake_case = '\n{0} = None\n' _snake_case = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' _snake_case = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase( self ) -> List[Any]: __UpperCamelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = find_backend(' if not is_tokenizers_available():' ) self.assertEqual(_SCREAMING_SNAKE_CASE , 'tokenizers' ) __UpperCamelCase = find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(_SCREAMING_SNAKE_CASE , 'tensorflow_text' ) __UpperCamelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers' ) __UpperCamelCase = find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tensorflow_text' ) __UpperCamelCase = find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(_SCREAMING_SNAKE_CASE , 'sentencepiece_and_tokenizers_and_vision' ) def __lowercase( self ) -> List[Any]: __UpperCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , _SCREAMING_SNAKE_CASE ) self.assertIn('tensorflow_text' , _SCREAMING_SNAKE_CASE ) self.assertIn('sentencepiece_and_tokenizers' , _SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def __lowercase( self ) -> Optional[Any]: __UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(_SCREAMING_SNAKE_CASE , '\nCONSTANT = None\n' ) __UpperCamelCase = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( _SCREAMING_SNAKE_CASE , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) __UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' __UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> Any: __UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' __UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , _SCREAMING_SNAKE_CASE )
567
0
'''simple docstring''' def a_ ( lowerCamelCase : int = 2000000 ): lowerCAmelCase = [0 for i in range(n + 1 )] lowerCAmelCase = 1 lowerCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , lowerCamelCase ): lowerCAmelCase = 1 lowerCAmelCase = 0 for i in range(lowerCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'''{solution() = }''')
133
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[str]=5_6 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="block_sparse" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=3 , ) -> Any: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_attention_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_choices lowerCAmelCase = rescale_embeddings lowerCAmelCase = attention_type lowerCAmelCase = use_bias lowerCAmelCase = block_size lowerCAmelCase = num_random_blocks def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_attention_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : Dict = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCamelCase : Union[str, Any] = False lowerCamelCase : int = False def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCAmelCase ( self : int ) -> str: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCAmelCase ( self : Dict ) -> Dict: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCAmelCase ( self : Optional[int] ) -> int: super().test_hidden_states_output() @slow def __UpperCAmelCase ( self : str ) -> Dict: for model_class_name in self.all_model_classes: lowerCAmelCase = model_class_name.from_pretrained('google/bigbird-roberta-base' ) self.assertIsNotNone(UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[str] ) -> Dict: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCAmelCase ( self : str ) -> int: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ): return model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ ) with self.subTest('JIT Enabled' ): lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Union[str, Any]="outputs" , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('outputs.attentions' ): return else: super().check_pt_flax_outputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
133
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCAmelCase = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["""MaskFormerFeatureExtractor"""] _lowerCAmelCase = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] _lowerCAmelCase = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
16
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase : def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = parent _lowerCAmelCase : int = batch_size _lowerCAmelCase : int = image_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Optional[int] = embeddings_size _lowerCAmelCase : Optional[int] = hidden_sizes _lowerCAmelCase : str = depths _lowerCAmelCase : str = is_training _lowerCAmelCase : int = use_labels _lowerCAmelCase : Optional[int] = hidden_act _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : Dict = scope _lowerCAmelCase : Union[str, Any] = len(_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels ) _lowerCAmelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def __lowerCamelCase ( self ,_A ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A ) _lowerCAmelCase : List[str] = model(_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowerCamelCase ( self ,_A ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : Any = self.num_labels _lowerCAmelCase : Dict = TFResNetForImageClassification(_A ) _lowerCAmelCase : int = model(_A ,labels=_A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs _lowerCAmelCase : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): _UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = TFResNetModelTester(self ) _lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A ) def __lowerCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): '''simple docstring''' return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : int = model_class(_A ) _lowerCAmelCase : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Any = [*signature.parameters.keys()] _lowerCAmelCase : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(_A ,_A ,_A ): _lowerCAmelCase : int = model_class(_A ) _lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) ) _lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase : int = self.model_tester.num_stages self.assertEqual(len(_A ) ,expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Any = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCAmelCase : Optional[int] = layer_type _lowerCAmelCase : Tuple = True check_hidden_states_output(_A ,_A ,_A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(_A ,_A ,_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def lowerCamelCase__ ( ): '''simple docstring''' _lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCAmelCase : Tuple = self.default_image_processor _lowerCAmelCase : Optional[Any] = prepare_img() _lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' ) # forward pass _lowerCAmelCase : int = model(**_A ) # verify the logits _lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape ,_A ) _lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
16
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase = 3 def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int: print("Generating primitive root of p" ) while True: a__ : Optional[int] = random.randrange(3 , __UpperCamelCase ) if pow(__UpperCamelCase , 2 , __UpperCamelCase ) == 1: continue if pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) == 1: continue return g def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p..." ) a__ : Any = rabin_miller.generate_large_prime(__UpperCamelCase ) # select large prime number. a__ : Union[str, Any] = primitive_root(__UpperCamelCase ) # one primitive root on modulo p. a__ : Optional[int] = random.randrange(3 , __UpperCamelCase ) # private_key -> have to be greater than 2 for safety. a__ : Optional[int] = cryptomath.find_mod_inverse(pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) a__ : int = (key_size, e_a, e_a, p) a__ : int = (key_size, d) return public_key, private_key def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> None: if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print("\nWARNING:" ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." ) sys.exit() a__ , a__ : str = generate_key(__UpperCamelCase ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , "w" ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , "w" ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def SCREAMING_SNAKE_CASE( ) -> None: print("Making key files..." ) make_key_files("elgamal" , 20_48 ) print("Key files generation successful" ) if __name__ == "__main__": main()
191
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
191
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github __a : str = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def SCREAMING_SNAKE_CASE ( ): a__ = Github(os.environ['''GITHUB_TOKEN''']) a__ = g.get_repo('''huggingface/diffusers''') a__ = repo.get_issues(state='''open''') for issue in open_issues: a__ = sorted(issue.get_comments() , key=lambda lowerCamelCase_: i.created_at , reverse=lowerCamelCase_) a__ = comments[0] if len(lowerCamelCase_) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''') elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''') issue.remove_from_labels('''stale''') elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''') issue.add_to_labels('''stale''') if __name__ == "__main__": main()
200
"""simple docstring""" def SCREAMING_SNAKE_CASE ( lowerCamelCase_): assert column_title.isupper() a__ = 0 a__ = len(lowerCamelCase_) - 1 a__ = 0 while index >= 0: a__ = (ord(column_title[index]) - 64) * pow(26 , lowerCamelCase_) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
200
1
'''simple docstring''' import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def _lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Optional[Any] ) ->List[Any]: """simple docstring""" lowercase__ = os.path.abspath(lowercase ) logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model lowercase__ = tf.train.list_variables(lowercase ) lowercase__ = [] lowercase__ = [] lowercase__ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowercase__ = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(F'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' lowercase__ = name[1:] # figure out how many levels deep the name is lowercase__ = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(lowercase ) # read data lowercase__ = tf.train.load_variable(lowercase , lowercase ) names.append('''/'''.join(lowercase ) ) arrays.append(lowercase ) logger.info(F'''Read a total of {len(lowercase ):,} layers''' ) # Sanity check if len(set(lowercase ) ) != 1: raise ValueError(F'''Found layer names with different depths (layer depth {list(set(lowercase ) )})''' ) lowercase__ = list(set(lowercase ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(lowercase , lowercase ): lowercase__ = full_name.split('''/''' ) lowercase__ = model lowercase__ = [] for i, m_name in enumerate(lowercase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): lowercase__ = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) lowercase__ = getattr(lowercase , '''embeddings''' ) lowercase__ = getattr(lowercase , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) lowercase__ = getattr(lowercase , '''encoder''' ) lowercase__ = getattr(lowercase , '''layer''' ) lowercase__ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) lowercase__ = getattr(lowercase , '''pooler''' ) lowercase__ = getattr(lowercase , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) lowercase__ = getattr(lowercase , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) lowercase__ = getattr(lowercase , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) lowercase__ = getattr(lowercase , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) lowercase__ = getattr(lowercase , '''token_type_embeddings''' ) else: raise ValueError(F'''Unknown embedding layer with name {full_name}''' ) trace.append('''weight''' ) lowercase__ = getattr(lowercase , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) lowercase__ = getattr(lowercase , '''attention''' ) lowercase__ = getattr(lowercase , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) lowercase__ = getattr(lowercase , '''attention''' ) lowercase__ = getattr(lowercase , '''output''' ) lowercase__ = getattr(lowercase , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) lowercase__ = getattr(lowercase , '''attention''' ) lowercase__ = getattr(lowercase , '''output''' ) lowercase__ = getattr(lowercase , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) lowercase__ = getattr(lowercase , '''output''' ) lowercase__ = getattr(lowercase , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) lowercase__ = getattr(lowercase , '''output''' ) lowercase__ = getattr(lowercase , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) lowercase__ = getattr(lowercase , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) lowercase__ = getattr(lowercase , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) lowercase__ = getattr(lowercase , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) lowercase__ = getattr(lowercase , '''intermediate''' ) lowercase__ = getattr(lowercase , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) lowercase__ = getattr(lowercase , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) lowercase__ = getattr(lowercase , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) lowercase__ = getattr(lowercase , '''weight''' ) else: logger.warning(F'''Ignored {m_name}''' ) # for certain layers reshape is necessary lowercase__ = '''.'''.join(lowercase ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowercase ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , lowercase ): lowercase__ = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowercase__ = array.transpose() if pointer.shape == array.shape: lowercase__ = torch.from_numpy(lowercase ) else: raise ValueError( F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' F''' {array.shape}''' ) logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : Any ) ->Union[str, Any]: """simple docstring""" logger.info(F'''Loading model based on config from {config_path}...''' ) lowercase__ = BertConfig.from_json_file(lowercase ) lowercase__ = BertModel(lowercase ) # Load weights from checkpoint logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(lowercase , lowercase , lowercase ) # Save pytorch-model logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , lowercase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) _lowerCAmelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
161
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __A ( a ): """simple docstring""" A_ = ['image_processor', 'tokenizer'] A_ = 'BridgeTowerImageProcessor' A_ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase )-> str: super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , )-> BatchEncoding: lowercase__ = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) # add pixel_values + pixel_mask lowercase__ = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase ) encoding.update(_lowerCamelCase ) return encoding def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> str: return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]: return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def snake_case_( self )-> List[Any]: lowercase__ = self.tokenizer.model_input_names lowercase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
161
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _SCREAMING_SNAKE_CASE ( __lowercase : Dict ) -> Dict: """simple docstring""" return 1 / (1 + np.exp(-z )) def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : List[Any] ) -> Dict: """simple docstring""" return (-y * np.log(__lowercase ) - (1 - y) * np.log(1 - h )).mean() def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : int ) -> Optional[int]: """simple docstring""" __A = np.dot(__lowercase , __lowercase ) return np.sum(y * scores - np.log(1 + np.exp(__lowercase ) ) ) def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : int , __lowercase : str , __lowercase : Any=7_0_0_0_0 ) -> Optional[Any]: """simple docstring""" __A = np.zeros(x.shape[1] ) for iterations in range(__lowercase ): __A = np.dot(__lowercase , __lowercase ) __A = sigmoid_function(__lowercase ) __A = np.dot(x.T , h - y ) / y.size __A = theta - alpha * gradient # updating the weights __A = np.dot(__lowercase , __lowercase ) __A = sigmoid_function(__lowercase ) __A = cost_function(__lowercase , __lowercase ) if iterations % 1_0_0 == 0: print(f"loss: {j} \t" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": __a : List[Any] = datasets.load_iris() __a : List[str] = iris.data[:, :2] __a : str = (iris.target != 0) * 1 __a : List[Any] = 0.1 __a : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000) print("theta: ", theta) # printing the theta i.e our weights vector def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> Optional[int]: """simple docstring""" return sigmoid_function( np.dot(__lowercase , __lowercase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") ((__a) ,(__a)) : int = (x[:, 0].min(), x[:, 0].max()) ((__a) ,(__a)) : Dict = (x[:, 1].min(), x[:, 1].max()) ((__a) ,(__a)) : int = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) __a : Union[str, Any] = np.c_[xxa.ravel(), xxa.ravel()] __a : List[str] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black") plt.legend() plt.show()
199
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" __A = checkpoints.load_tax_checkpoint(__lowercase ) __A = flatten_dict(__lowercase ) return flax_params def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> List[str]: """simple docstring""" __A = {} __A = { """token_embedder""": """embeddings""", """encoder_norm""": """layernorm""", """kernel""": """weight""", """.out""": """.output""", """scale""": """weight""", """embedders_0.pos_embedding""": """row_embedder.weight""", """embedders_1.pos_embedding""": """column_embedder.weight""", } __A = { """query""": """attention.query""", """key""": """attention.key""", """value""": """attention.value""", """output.dense""": """output""", """encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""", """pre_self_attention_layer_norm""": """self_attention.layer_norm""", """pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""", """mlp.""": """mlp.DenseReluDense.""", """pre_mlp_layer_norm""": """mlp.layer_norm""", """self_attention.o""": """self_attention.attention.o""", """decoder.embeddings.embedding""": """decoder.embed_tokens.weight""", """decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""", """decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.logits_dense.weight""": """decoder.lm_head.weight""", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __A = """.""".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __A = new_key.replace(__lowercase , __lowercase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __A = new_key.replace(__lowercase , __lowercase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __A = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __lowercase ) __A = new_key.replace("""encoder""" , """encoder.encoder""" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __A = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __lowercase ) __A = flax_dict[key] __A = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __A = torch.from_numpy(converted_dict[key].T ) else: __A = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : List[Any] , __lowercase : Dict=False , __lowercase : List[str]=False ) -> Any: """simple docstring""" __A = get_flax_param(__lowercase ) if not use_large: __A = PixaStructVisionConfig() __A = PixaStructTextConfig() else: __A = PixaStructVisionConfig( hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 ) __A = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 ) __A = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowercase ) __A = PixaStructForConditionalGeneration(__lowercase ) __A = rename_and_convert_flax_params(__lowercase ) model.load_state_dict(__lowercase ) __A = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" ) __A = PixaStructImageProcessor() __A = PixaStructProcessor(image_processor=__lowercase , tokenizer=__lowercase ) if use_large: __A = 4_0_9_6 __A = True # mkdir if needed os.makedirs(__lowercase , exist_ok=__lowercase ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) print("""Model saved in {}""".format(__lowercase ) ) if __name__ == "__main__": __a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") __a : Dict = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
199
1
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class UpperCAmelCase : def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]="resnet50" , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : str=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , ): """simple docstring""" _snake_case = parent _snake_case = out_indices if out_indices is not None else [4] _snake_case = stage_names _snake_case = out_features _snake_case = backbone _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = use_pretrained_backbone _snake_case = is_training def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = self.get_config() return config, pixel_values def __UpperCAmelCase ( self : List[str] ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): """simple docstring""" _snake_case = TimmBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): _snake_case = model(__lowerCamelCase ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Optional[int] = (TimmBackbone,) if is_torch_available() else () A__ : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[str] = False A__ : Any = False A__ : Optional[Any] = False def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = TimmBackboneModelTester(self ) _snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = '''resnet18''' _snake_case = '''microsoft/resnet-18''' _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase , out_indices=[1, 2, 3] ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def __UpperCAmelCase ( self : Any ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" pass def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(__lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality _snake_case = self.all_model_classes[0] _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) _snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) _snake_case = model(**__lowerCamelCase ) _snake_case = outputs[0][-1] # Encoder-/Decoder-only models _snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__lowerCamelCase ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None _snake_case = copy.deepcopy(__lowerCamelCase ) _snake_case = None _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights _snake_case = copy.deepcopy(__lowerCamelCase ) _snake_case = False _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase )
103
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
from __future__ import annotations A__: Union[str, Any] = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class _a : """simple docstring""" def __init__( self: List[Any] , __lowerCamelCase: dict[str, list[str]] , __lowerCamelCase: str ): '''simple docstring''' UpperCamelCase__: Optional[Any] = graph # mapping node to its parent in resulting breadth first tree UpperCamelCase__: Dict = {} UpperCamelCase__: Tuple = source_vertex def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' UpperCamelCase__: Dict = {self.source_vertex} UpperCamelCase__: int = None UpperCamelCase__: Any = [self.source_vertex] # first in first out queue while queue: UpperCamelCase__: Union[str, Any] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_snake_case ) UpperCamelCase__: Union[str, Any] = vertex queue.append(_snake_case ) def UpperCAmelCase_ ( self: str , __lowerCamelCase: str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex UpperCamelCase__: Any = self.parent.get(_snake_case ) if target_vertex_parent is None: UpperCamelCase__: str = ( F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(_snake_case ) return self.shortest_path(_snake_case ) + F"->{target_vertex}" if __name__ == "__main__": A__: Optional[Any] = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
708
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig A__: Union[str, Any] = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = """albert""" def __init__( self: Dict , __lowerCamelCase: int=3_0000 , __lowerCamelCase: Dict=128 , __lowerCamelCase: Optional[int]=4096 , __lowerCamelCase: Optional[int]=12 , __lowerCamelCase: List[Any]=1 , __lowerCamelCase: List[Any]=64 , __lowerCamelCase: Optional[Any]=1_6384 , __lowerCamelCase: int=1 , __lowerCamelCase: List[str]="gelu_new" , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Union[str, Any]=512 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=1e-12 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Dict="absolute" , __lowerCamelCase: List[str]=0 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Dict=3 , **__lowerCamelCase: int , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) UpperCamelCase__: Any = vocab_size UpperCamelCase__: str = embedding_size UpperCamelCase__: Optional[Any] = hidden_size UpperCamelCase__: Any = num_hidden_layers UpperCamelCase__: str = num_hidden_groups UpperCamelCase__: int = num_attention_heads UpperCamelCase__: Union[str, Any] = inner_group_num UpperCamelCase__: str = hidden_act UpperCamelCase__: Tuple = intermediate_size UpperCamelCase__: Dict = hidden_dropout_prob UpperCamelCase__: List[Any] = attention_probs_dropout_prob UpperCamelCase__: List[str] = max_position_embeddings UpperCamelCase__: Optional[Any] = type_vocab_size UpperCamelCase__: Any = initializer_range UpperCamelCase__: int = layer_norm_eps UpperCamelCase__: List[str] = classifier_dropout_prob UpperCamelCase__: str = position_embedding_type class _a ( UpperCamelCase__): """simple docstring""" @property def UpperCAmelCase_ ( self: Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__: Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase__: Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
221
0
'''simple docstring''' import argparse import os import re import packaging.version lowercase : List[Any] = "examples/" lowercase : Dict = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } lowercase : str = { "init": "src/transformers/__init__.py", "setup": "setup.py", } lowercase : Union[str, Any] = "README.md" def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Tuple: with open(__A , 'r' , encoding='utf-8' , newline='\n' ) as f: _snake_case = f.read() _snake_case , _snake_case = REPLACE_PATTERNS[pattern] _snake_case = replace.replace('VERSION' , __A ) _snake_case = re_pattern.sub(__A , __A ) with open(__A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(__A ) def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]: for folder, directories, fnames in os.walk(__A ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__A , __A ) , __A , pattern='examples' ) def SCREAMING_SNAKE_CASE__ ( __A , __A=False ) -> Tuple: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__A , __A , __A ) if not patch: update_version_in_examples(__A ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: _snake_case = '🤗 Transformers currently provides the following architectures' _snake_case = '1. Want to contribute a new model?' with open(__A , 'r' , encoding='utf-8' , newline='\n' ) as f: _snake_case = f.readlines() # Find the start of the list. _snake_case = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _snake_case = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): _snake_case = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(__A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__A ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: with open(REPLACE_FILES['init'] , 'r' ) as f: _snake_case = f.read() _snake_case = REPLACE_PATTERNS['init'][0].search(__A ).groups()[0] return packaging.version.parse(__A ) def SCREAMING_SNAKE_CASE__ ( __A=False ) -> Any: _snake_case = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: _snake_case = default_version.base_version elif patch: _snake_case = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: _snake_case = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. _snake_case = input(F'Which version are you releasing? [{default_version}]' ) if len(__A ) == 0: _snake_case = default_version print(F'Updating version to {version}.' ) global_version_update(__A , patch=__A ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: _snake_case = get_version() _snake_case = F'{current_version.major}.{current_version.minor + 1}.0.dev0' _snake_case = current_version.base_version # Check with the user we got that right. _snake_case = input(F'Which version are we developing now? [{dev_version}]' ) if len(__A ) == 0: _snake_case = dev_version print(F'Updating version to {version}.' ) global_version_update(__A ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") lowercase : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
495
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowercase : Any = { "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
495
1
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch lowercase__ :int = logging.get_logger(__name__) class snake_case : '''simple docstring''' def __init__( self : str , __lowercase : str = None , __lowercase : uuid.UUID = None , __lowercase : Any=None , __lowercase : List[Any]=None ): '''simple docstring''' if not conversation_id: __UpperCAmelCase : Dict = uuid.uuida() if past_user_inputs is None: __UpperCAmelCase : Dict = [] if generated_responses is None: __UpperCAmelCase : str = [] __UpperCAmelCase : uuid.UUID = conversation_id __UpperCAmelCase : List[str] = past_user_inputs __UpperCAmelCase : List[str] = generated_responses __UpperCAmelCase : Optional[str] = text def __eq__( self : Union[str, Any] , __lowercase : Union[str, Any] ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def A_ ( self : List[Any] , __lowercase : str , __lowercase : bool = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten ''' f'''with: \"{text}\".''' ) __UpperCAmelCase : Optional[int] = text else: logger.warning( f'''User input added while unprocessed input was existing: \"{self.new_user_input}\" new input ''' f'''ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input''' ) else: __UpperCAmelCase : Union[str, Any] = text def A_ ( self : Tuple ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __UpperCAmelCase : Optional[int] = None def A_ ( self : str , __lowercase : str ): '''simple docstring''' self.generated_responses.append(_UpperCamelCase ) def A_ ( self : str ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): __UpperCAmelCase : str = """user""" if is_user else """bot""" output += f'''{name} >> {text} \n''' return output @add_end_docstrings( __lowerCAmelCase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple , *__lowercase : str , **__lowercase : Dict ): '''simple docstring''' super().__init__(*_UpperCamelCase , **_UpperCamelCase ) if self.tokenizer.pad_token_id is None: __UpperCAmelCase : Any = self.tokenizer.eos_token def A_ ( self : int , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , **__lowercase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : List[Any] = {} if min_length_for_response is not None: __UpperCAmelCase : Dict = min_length_for_response if minimum_tokens is not None: __UpperCAmelCase : Dict = minimum_tokens if "max_length" in generate_kwargs: __UpperCAmelCase : Any = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __UpperCAmelCase : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_UpperCamelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self : Any , __lowercase : Union[Conversation, List[Conversation]] , __lowercase : Optional[Any]=0 , **__lowercase : List[Any] ): '''simple docstring''' __UpperCAmelCase : str = super().__call__(_UpperCamelCase , num_workers=_UpperCamelCase , **_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1: return outputs[0] return outputs def A_ ( self : Tuple , __lowercase : Conversation , __lowercase : Tuple=32 ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __UpperCAmelCase : List[str] = self.tokenizer._build_conversation_input_ids(_UpperCamelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __UpperCAmelCase : Optional[int] = self._legacy_parse_and_tokenize(_UpperCamelCase ) if self.framework == "pt": __UpperCAmelCase : Tuple = torch.LongTensor([input_ids] ) elif self.framework == "tf": __UpperCAmelCase : Optional[Any] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def A_ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Tuple=10 , **__lowercase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __UpperCAmelCase : Union[str, Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) __UpperCAmelCase : Optional[Any] = max_length - minimum_tokens __UpperCAmelCase : Tuple = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __UpperCAmelCase : List[str] = model_inputs["""attention_mask"""][:, -trim:] __UpperCAmelCase : List[Any] = model_inputs.pop('''conversation''' ) __UpperCAmelCase : Any = max_length __UpperCAmelCase : str = self.model.generate(**_UpperCamelCase , **_UpperCamelCase ) if self.model.config.is_encoder_decoder: __UpperCAmelCase : Optional[Any] = 1 else: __UpperCAmelCase : List[Any] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def A_ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : str=True ): '''simple docstring''' __UpperCAmelCase : List[str] = model_outputs["""output_ids"""] __UpperCAmelCase : Dict = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , ) __UpperCAmelCase : Tuple = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(_UpperCamelCase ) return conversation def A_ ( self : Optional[int] , __lowercase : Conversation ): '''simple docstring''' __UpperCAmelCase : str = self.tokenizer.eos_token_id __UpperCAmelCase : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) ) if len(_UpperCamelCase ) > self.tokenizer.model_max_length: __UpperCAmelCase : List[str] = input_ids[-self.tokenizer.model_max_length :] return input_ids
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase__ :Tuple = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Optional[Any] = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :List[Any] = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowercase__ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
374
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class __SCREAMING_SNAKE_CASE : @staticmethod def lowerCamelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' pass def _lowerCAmelCase ( __magic_name__ : str ) -> List[str]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. UpperCamelCase_ = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : List[Any] =pipeline( '''document-question-answering''' , model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) lowercase : str =INVOICE_URL lowercase : Dict =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) ) lowercase : Optional[int] ='''What is the placebo?''' lowercase : Optional[int] =[ { '''image''': load_image(UpperCAmelCase__ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Dict =dqa_pipeline(UpperCAmelCase__ , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [ [ {'''score''': ANY(UpperCAmelCase__ ), '''answer''': ANY(UpperCAmelCase__ ), '''start''': ANY(UpperCAmelCase__ ), '''end''': ANY(UpperCAmelCase__ )}, {'''score''': ANY(UpperCAmelCase__ ), '''answer''': ANY(UpperCAmelCase__ ), '''start''': ANY(UpperCAmelCase__ ), '''end''': ANY(UpperCAmelCase__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : str =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) lowercase : List[str] =INVOICE_URL lowercase : Tuple ='''How many cats are there?''' lowercase : str =[ {'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ ) lowercase : Union[str, Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' lowercase : Any =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(UpperCAmelCase__ , [] ) # We can optionnally pass directly the words and bounding boxes lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' lowercase : Dict =[] lowercase : List[Any] =[] lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , words=UpperCAmelCase__ , boxes=UpperCAmelCase__ , top_k=2 ) self.assertEqual(UpperCAmelCase__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) lowercase : Union[str, Any] =INVOICE_URL lowercase : Any ='''What is the invoice number?''' lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) lowercase : Any =dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Any =pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) lowercase : Optional[int] =INVOICE_URL lowercase : Dict ='''What is the invoice number?''' lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) lowercase : Optional[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) lowercase : Optional[Any] =dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : Optional[int] =AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase__ ) lowercase : Optional[Any] =pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase__ , revision='''3dc6de3''' , ) lowercase : Dict =INVOICE_URL lowercase : Union[str, Any] ='''What is the invoice number?''' lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) lowercase : Dict =dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) lowercase : Dict =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) ) # This model should also work if `image` is set to None lowercase : int =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[str] =AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase__ ) lowercase : Any =pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , ) lowercase : Union[str, Any] =INVOICE_URL lowercase : List[str] ='''What is the invoice number?''' lowercase : Dict =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) lowercase : Tuple =dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) lowercase : List[Any] =list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , '''''' ) ) ) # This model should also work if `image` is set to None lowercase : str =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple =pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) lowercase : Tuple =INVOICE_URL lowercase : str ='''What is the invoice number?''' lowercase : str =dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' pass
92
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case: def __init__(self : List[Any] , a : str , a : Any=13 , a : Optional[Any]=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : List[str]=True , a : List[Any]=True , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : List[str]=37 , a : List[str]="gelu" , a : int=0.1 , a : int=0.1 , a : str=10 , a : Tuple=0.02 , a : Union[str, Any]=3 , a : List[str]=None , a : Any=2 , ) -> Optional[int]: """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 2 def _UpperCamelCase (self : Optional[Any] ) -> List[str]: """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _UpperCamelCase (self : Union[str, Any] ) -> int: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _UpperCamelCase (self : Optional[int] , a : Tuple , a : Dict , a : Optional[Any] ) -> Optional[Any]: """simple docstring""" A__ = DeiTModel(config=a ) model.to(a ) model.eval() A__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase (self : Optional[int] , a : Any , a : Optional[Any] , a : Optional[int] ) -> Dict: """simple docstring""" A__ = DeiTForMaskedImageModeling(config=a ) model.to(a ) model.eval() A__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ = 1 A__ = DeiTForMaskedImageModeling(a ) model.to(a ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _UpperCamelCase (self : Optional[Any] , a : Optional[Any] , a : Union[str, Any] , a : Union[str, Any] ) -> str: """simple docstring""" A__ = self.type_sequence_label_size A__ = DeiTForImageClassification(a ) model.to(a ) model.eval() A__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = DeiTForImageClassification(a ) model.to(a ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase (self : List[Any] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _snake_case( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): __snake_case: Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __snake_case: int = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __snake_case: Any = False __snake_case: Any = False __snake_case: Any = False def _UpperCamelCase (self : Any ) -> int: """simple docstring""" A__ = DeiTModelTester(self ) A__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def _UpperCamelCase (self : Dict ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def _UpperCamelCase (self : List[str] ) -> List[str]: """simple docstring""" pass def _UpperCamelCase (self : Optional[Any] ) -> Tuple: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCamelCase (self : Union[str, Any] ) -> str: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(a ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCamelCase (self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCamelCase (self : Dict ) -> Any: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def _UpperCamelCase (self : List[Any] ) -> Optional[Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) def _UpperCamelCase (self : Optional[int] , a : int , a : Union[str, Any] , a : List[Any]=False ) -> Optional[int]: """simple docstring""" A__ = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _UpperCamelCase (self : Any ) -> Tuple: """simple docstring""" if not self.model_tester.is_training: return A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A__ = model_class(a ) model.to(a ) model.train() A__ = self._prepare_for_class(a , a , return_labels=a ) A__ = model(**a ).loss loss.backward() def _UpperCamelCase (self : Optional[Any] ) -> int: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A__ = False A__ = True for model_class in self.all_model_classes: if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A__ = model_class(a ) model.gradient_checkpointing_enable() model.to(a ) model.train() A__ = self._prepare_for_class(a , a , return_labels=a ) A__ = model(**a ).loss loss.backward() def _UpperCamelCase (self : Optional[Any] ) -> List[str]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a ), *get_values(a ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ): A__ = problem_type['title'] A__ = problem_type['num_labels'] A__ = model_class(a ) model.to(a ) model.train() A__ = self._prepare_for_class(a , a , return_labels=a ) if problem_type["num_labels"] > 1: A__ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) A__ = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a ) as warning_list: A__ = model(**a ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def _UpperCamelCase (self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = DeiTModel.from_pretrained(a ) self.assertIsNotNone(a ) def _A ( ): '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _snake_case( unittest.TestCase ): @cached_property def _UpperCamelCase (self : Tuple ) -> Union[str, Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def _UpperCamelCase (self : List[str] ) -> Optional[Any]: """simple docstring""" A__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): A__ = model(**a ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , a ) A__ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _UpperCamelCase (self : Tuple ) -> str: """simple docstring""" A__ = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=a , return_tensors='pt' ) A__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A__ = model(a )
531
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : List[str] = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Tuple = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
711
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
0
SCREAMING_SNAKE_CASE: Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _a ( lowerCAmelCase )-> int: SCREAMING_SNAKE_CASE_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution SCREAMING_SNAKE_CASE: list[bool | None] = [None] * 1_0_0_0_0_0_0_0 SCREAMING_SNAKE_CASE: Union[str, Any] = True SCREAMING_SNAKE_CASE: Tuple = False def _a ( lowerCAmelCase )-> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore SCREAMING_SNAKE_CASE_ = chain(next_number(lowerCAmelCase ) ) SCREAMING_SNAKE_CASE_ = number_chain while number < 10000000: SCREAMING_SNAKE_CASE_ = number_chain number *= 10 return number_chain def _a ( lowerCAmelCase = 10000000 )-> int: for i in range(1 , lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
360
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax SCREAMING_SNAKE_CASE: Optional[int] = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE__ ) class lowercase_ (SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , **snake_case__ : str ): """simple docstring""" super().__init__(**snake_case__ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : str , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Optional[Any] ): """simple docstring""" return super().__call__(snake_case__ , **snake_case__ ) def __a ( self : str , **snake_case__ : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {} if "candidate_labels" in kwargs: SCREAMING_SNAKE_CASE_ = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: SCREAMING_SNAKE_CASE_ = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : List[str]="This is a photo of {}." ): """simple docstring""" SCREAMING_SNAKE_CASE_ = load_image(snake_case__ ) SCREAMING_SNAKE_CASE_ = self.image_processor(images=[image] , return_tensors=self.framework ) SCREAMING_SNAKE_CASE_ = candidate_labels SCREAMING_SNAKE_CASE_ = [hypothesis_template.format(snake_case__ ) for x in candidate_labels] SCREAMING_SNAKE_CASE_ = self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ ) SCREAMING_SNAKE_CASE_ = [text_inputs] return inputs def __a ( self : List[Any] , snake_case__ : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ = model_inputs.pop('candidate_labels' ) SCREAMING_SNAKE_CASE_ = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , snake_case__ ): SCREAMING_SNAKE_CASE_ = text_inputs[0] else: # Batching case. SCREAMING_SNAKE_CASE_ = text_inputs[0][0] SCREAMING_SNAKE_CASE_ = self.model(**snake_case__ , **snake_case__ ) SCREAMING_SNAKE_CASE_ = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __a ( self : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = model_outputs.pop('candidate_labels' ) SCREAMING_SNAKE_CASE_ = model_outputs['logits'][0] if self.framework == "pt": SCREAMING_SNAKE_CASE_ = logits.softmax(dim=-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE_ = probs.tolist() if not isinstance(snake_case__ , snake_case__ ): SCREAMING_SNAKE_CASE_ = [scores] elif self.framework == "tf": SCREAMING_SNAKE_CASE_ = stable_softmax(snake_case__ , axis=-1 ) SCREAMING_SNAKE_CASE_ = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) SCREAMING_SNAKE_CASE_ = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : -x[0] ) ] return result
360
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) _snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ ) torch.manual_seed(0 ) _snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _snake_case = CLIPTextModel(lowerCAmelCase_ ) _snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) _snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ) if str(lowerCAmelCase_ ).startswith('mps' ): _snake_case = torch.manual_seed(lowerCAmelCase_ ) else: _snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) _snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self ) -> List[Any]: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = sd_pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> Tuple: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = 'french fries' _snake_case = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ ) _snake_case = output.images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> List[str]: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = [inputs['prompt']] * 2 _snake_case = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0 _snake_case = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ ) _snake_case = image / 2 + 0.5 _snake_case = image.permute(0 , 3 , 1 , 2 ) _snake_case = image.repeat(2 , 1 , 1 , 1 ) _snake_case = sd_pipe(**lowerCAmelCase_ ).images _snake_case = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) _snake_case = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> Any: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' ) _snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = sd_pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1] _snake_case = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(lowerCAmelCase_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) _snake_case = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> Any: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.get_dummy_components() _snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ ) _snake_case = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ ) _snake_case = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0] _snake_case = components['vae'] _snake_case = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _snake_case = vae.encode(inputs[image_param] ).latent_dist.mode() _snake_case = pipe(**lowerCAmelCase_ )[0] _snake_case = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase_ , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self , lowerCAmelCase_=0 ) -> List[str]: _snake_case = torch.manual_seed(lowerCAmelCase_ ) _snake_case = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) _snake_case = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self ) -> Dict: _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() _snake_case = self.get_inputs() _snake_case = pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> List[str]: _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ ) _snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() _snake_case = self.get_inputs() _snake_case = pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> Tuple: _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ ) _snake_case = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() _snake_case = self.get_inputs() _snake_case = pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = 0 def callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: _snake_case = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _snake_case = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _snake_case = latents[0, -3:, -3:, -1] _snake_case = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: _snake_case = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _snake_case = latents[0, -3:, -3:, -1] _snake_case = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 _snake_case = False _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa ) _snake_case = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() _snake_case = self.get_inputs() pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCAmelCase ( self ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa ) _snake_case = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _snake_case = self.get_inputs() _snake_case = pipe(**lowerCAmelCase_ ) _snake_case = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowerCAmelCase ( self ) -> int: _snake_case = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _snake_case = inputs['image'].resize((504, 504) ) _snake_case = 'timbrooks/instruct-pix2pix' _snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() _snake_case = pipe(**lowerCAmelCase_ ) _snake_case = output.images[0] _snake_case = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) _snake_case = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
541
import argparse from collections import defaultdict import yaml UpperCAmelCase_ = """docs/source/en/_toctree.yml""" def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' _snake_case = defaultdict(UpperCamelCase__ ) _snake_case = [] _snake_case = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(UpperCamelCase__ ) _snake_case = new_doc_list _snake_case = [key for key, value in counts.items() if value > 1] _snake_case = [] for duplicate_key in duplicates: _snake_case = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(UpperCamelCase__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) _snake_case = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCamelCase__ ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(UpperCamelCase__ ) # Sort return overview_doc def lowerCamelCase__ ( UpperCamelCase__ : Dict=False ) -> Optional[int]: '''simple docstring''' with open(UpperCamelCase__ , encoding='utf-8' ) as f: _snake_case = yaml.safe_load(f.read() ) # Get to the API doc _snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 _snake_case = content[api_idx]['sections'] # Then to the model doc _snake_case = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _snake_case = api_doc[scheduler_idx]['sections'] _snake_case = clean_doc_toc(UpperCamelCase__ ) _snake_case = False if new_scheduler_doc != scheduler_doc: _snake_case = True if overwrite: _snake_case = new_scheduler_doc if diff: if overwrite: _snake_case = api_doc with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple=False ) -> List[Any]: '''simple docstring''' with open(UpperCamelCase__ , encoding='utf-8' ) as f: _snake_case = yaml.safe_load(f.read() ) # Get to the API doc _snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 _snake_case = content[api_idx]['sections'] # Then to the model doc _snake_case = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _snake_case = False _snake_case = api_doc[pipeline_idx]['sections'] _snake_case = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _snake_case = pipeline_doc['section'] _snake_case = clean_doc_toc(UpperCamelCase__ ) if overwrite: _snake_case = new_sub_pipeline_doc new_pipeline_docs.append(UpperCamelCase__ ) # sort overall pipeline doc _snake_case = clean_doc_toc(UpperCamelCase__ ) if new_pipeline_docs != pipeline_docs: _snake_case = True if overwrite: _snake_case = new_pipeline_docs if diff: if overwrite: _snake_case = api_doc with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
541
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __lowerCAmelCase ( A_ : str ) -> Any: __UpperCAmelCase = torch.load(A_ , map_location="cpu" ) if "model" in sd.keys(): __UpperCAmelCase = torch.load(A_ , map_location="cpu" )["model"] # pop unnecessary weights __UpperCAmelCase = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(A_ ) __UpperCAmelCase = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __UpperCAmelCase = sd.pop(A_ ) __UpperCAmelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __UpperCAmelCase = sd[key] # We split QKV in separate Q,K,V __UpperCAmelCase = key.replace(".qkv_proj." , ".q_proj." ) __UpperCAmelCase = key.replace(".qkv_proj." , ".k_proj." ) __UpperCAmelCase = key.replace(".qkv_proj." , ".v_proj." ) __UpperCAmelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = torch.split(A_ , depth // 3 , dim=0 ) __UpperCAmelCase = q __UpperCAmelCase = k __UpperCAmelCase = v del sd[key] return sd @torch.no_grad() def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Tuple , A_ : Optional[Any]=None ) -> Optional[int]: __UpperCAmelCase = load_checkpoint(A_ ) if config is not None: __UpperCAmelCase = OPTConfig.from_pretrained(A_ ) else: __UpperCAmelCase = OPTConfig() __UpperCAmelCase = OPTModel(A_ ).half().eval() model.load_state_dict(A_ ) # Check results Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") a_ = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
221
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node a_ = 4 a_ = 3 class UpperCAmelCase__ ( snake_case ): """simple docstring""" pass def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]: for shard in shards: for i in range(A_ ): yield {"i": i, "shard": shard} def __lowerCAmelCase ( ) -> List[str]: __UpperCAmelCase = int(os.environ["RANK"] ) __UpperCAmelCase = int(os.environ["WORLD_SIZE"] ) __UpperCAmelCase = ArgumentParser() parser.add_argument("--streaming" , type=A_ ) parser.add_argument("--local_rank" , type=A_ ) parser.add_argument("--num_workers" , type=A_ , default=0 ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = args.streaming __UpperCAmelCase = args.num_workers __UpperCAmelCase = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(A_ )]} __UpperCAmelCase = IterableDataset.from_generator(A_ , gen_kwargs=A_ ) if not streaming: __UpperCAmelCase = Dataset.from_list(list(A_ ) ) __UpperCAmelCase = split_dataset_by_node(A_ , rank=A_ , world_size=A_ ) __UpperCAmelCase = torch.utils.data.DataLoader(A_ , num_workers=A_ ) __UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD __UpperCAmelCase = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __UpperCAmelCase = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
221
1
"""simple docstring""" import argparse import datetime def _lowerCAmelCase ( _UpperCamelCase ): """simple docstring""" _lowercase: Optional[int] = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } _lowercase: Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_UpperCamelCase ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month _lowercase: int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) _lowercase: str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day _lowercase: int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator _lowercase: str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year _lowercase: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation _lowercase: Optional[Any] = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) ) # Start math if m <= 2: _lowercase: int = y - 1 _lowercase: Union[str, Any] = m + 12 # maths var _lowercase: int = int(str(_UpperCamelCase )[:2] ) _lowercase: int = int(str(_UpperCamelCase )[2:] ) _lowercase: int = int(2.6 * m - 5.39 ) _lowercase: int = int(c / 4 ) _lowercase: int = int(k / 4 ) _lowercase: int = int(d + k ) _lowercase: int = int(t + u + v + x ) _lowercase: int = int(z - (2 * c) ) _lowercase: int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response _lowercase: str = f'''Your date {date_input}, is a {days[str(_UpperCamelCase )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() A__ : Tuple = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) A__ : Optional[Any] = parser.parse_args() zeller(args.date_input)
707
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) A__ : Any = logging.getLogger() def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase: Optional[int] = '''\n'''.join(_UpperCamelCase ) Path(_UpperCamelCase ).open('''w''' ).writelines(_UpperCamelCase ) A__ : int = 'patrickvonplaten/t5-tiny-random' A__ : Tuple = 'sshleifer/bart-tiny-random' A__ : Optional[Any] = 'sshleifer/tiny-mbart' A__ : Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __magic_name__ ( SCREAMING_SNAKE_CASE__ ): def lowercase_ ( self , A_ ) -> List[Any]: """simple docstring""" _lowercase: str = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _lowercase: Tuple = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _lowercase: List[str] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(A_ , A_ ) _lowercase: int = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) _lowercase: Optional[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _lowercase: str = f''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(A_ , '''argv''' , A_ ): run_generate() assert Path(A_ ).exists() # os.remove(Path(output_file_name)) def lowercase_ ( self ) -> Any: """simple docstring""" self.run_eval_tester(A_ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowercase_ ( self , A_ ) -> Union[str, Any]: """simple docstring""" self.run_eval_tester(A_ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowercase_ ( self , A_ ) -> List[Any]: """simple docstring""" _lowercase: Tuple = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _lowercase: Dict = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _lowercase: int = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _lowercase: Tuple = Path(self.get_auto_remove_tmp_dir() ) _lowercase: Tuple = str(tmp_dir / '''scores.json''' ) _lowercase: List[str] = str(tmp_dir / '''val.target''' ) _dump_articles(A_ , text['''en'''] ) _dump_articles(A_ , text['''de'''] ) _lowercase: Tuple = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _lowercase: Tuple = f''' run_eval_search.py {model} {str(A_ )} {str(A_ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(A_ , '''argv''' , A_ ): with CaptureStdout() as cs: run_search() _lowercase: List[str] = [''' num_beams | length_penalty''', model, '''Best score args'''] _lowercase: Union[str, Any] = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(A_ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(A_ ).exists() os.remove(Path(A_ ) )
272
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
382
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] _snake_case = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
382
1
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) UpperCamelCase__ = logging.getLogger(__name__) class a ( lowercase ): def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ): UpperCAmelCase__ : List[Any] = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) UpperCAmelCase__ : str = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , lowercase , ) class a ( lowercase ): def __init__( self , UpperCamelCase_ ): super().__init__(UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : str = 0 UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : List[Any] = 0 def __snake_case ( self , UpperCamelCase_ ): UpperCAmelCase__ : List[Any] = threshold def __snake_case ( self , UpperCamelCase_ ): UpperCAmelCase__ : Optional[Any] = patience def __snake_case ( self ): UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Union[str, Any] = 0 def __snake_case ( self ): UpperCAmelCase__ : str = self.inference_layers_num / self.inference_instances_num UpperCAmelCase__ : int = ( F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: UpperCAmelCase__ : Any = input_ids.size() elif inputs_embeds is not None: UpperCAmelCase__ : Tuple = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) UpperCAmelCase__ : Any = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCAmelCase__ : Union[str, Any] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: UpperCAmelCase__ : Any = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCAmelCase__ : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = encoder_hidden_states.size() UpperCAmelCase__ : Optional[Any] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: UpperCAmelCase__ : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) UpperCAmelCase__ : str = self.invert_attention_mask(UpperCamelCase_ ) else: UpperCAmelCase__ : int = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCAmelCase__ : str = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) UpperCAmelCase__ : Tuple = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) UpperCAmelCase__ : int = embedding_output if self.training: UpperCAmelCase__ : int = [] for i in range(self.config.num_hidden_layers ): UpperCAmelCase__ : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) UpperCAmelCase__ : Tuple = self.pooler(UpperCamelCase_ ) UpperCAmelCase__ : Dict = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference UpperCAmelCase__ : Dict = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) UpperCAmelCase__ : Optional[int] = self.pooler(encoder_outputs[0] ) UpperCAmelCase__ : str = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: UpperCAmelCase__ : str = 0 UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 UpperCAmelCase__ : Optional[int] = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = self.pooler(UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: UpperCAmelCase__ : Dict = logits.detach() if patient_result is not None: UpperCAmelCase__ : Tuple = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: UpperCAmelCase__ : Tuple = 0 else: UpperCAmelCase__ : int = logits.detach().argmax(dim=1 ) if patient_result is not None: UpperCAmelCase__ : List[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : Tuple = logits if patient_counter == self.patience: break UpperCAmelCase__ : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , lowercase , ) class a ( lowercase ): def __init__( self , UpperCamelCase_ ): super().__init__(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = config.num_labels UpperCAmelCase__ : Tuple = BertModelWithPabee(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase__ : Optional[Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ): UpperCAmelCase__ : Dict = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) UpperCAmelCase__ : List[str] = (logits[-1],) if labels is not None: UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression UpperCAmelCase__ : Tuple = MSELoss() UpperCAmelCase__ : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase__ : str = CrossEntropyLoss() UpperCAmelCase__ : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: UpperCAmelCase__ : str = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 UpperCAmelCase__ : Dict = (total_loss / total_weights,) + outputs return outputs
254
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a : UpperCamelCase : int UpperCamelCase : int class a : def __init__( self , UpperCamelCase_ ): UpperCAmelCase__ : list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )] UpperCAmelCase__ : Union[str, Any] = size def __getitem__( self , UpperCamelCase_ ): return iter(self._graph[vertex] ) @property def __snake_case ( self ): return self._size def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[str] = deque([start_vertex] ) UpperCAmelCase__ : list[int | None] = [None] * self.size UpperCAmelCase__ : List[str] = 0 while queue: UpperCAmelCase__ : Dict = queue.popleft() UpperCAmelCase__ : Optional[Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: UpperCAmelCase__ : Optional[int] = current_distance + edge.weight UpperCAmelCase__ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and new_distance >= dest_vertex_distance ): continue UpperCAmelCase__ : Dict = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
254
1
from functools import reduce _lowerCamelCase : Tuple = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def a_ ( __lowercase : str = N ) -> Optional[int]: return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowercase , __lowercase : str(int(__lowercase ) * int(__lowercase ) ) , n[i : i + 13] ) ) for i in range(len(__lowercase ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
686
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) A__ : Union[str, Any] =[True] * (num + 1) A__ : Union[str, Any] =2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): A__ : str =False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
656
0
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase__(A , A , A ) ->str: """simple docstring""" if openai_config_file == "": lowercase__ : Any= OpenAIGPTConfig() else: lowercase__ : List[Any]= OpenAIGPTConfig.from_json_file(A ) lowercase__ : Optional[int]= OpenAIGPTModel(A ) # Load weights from numpy load_tf_weights_in_openai_gpt(A , A , A ) # Save pytorch-model lowercase__ : Union[str, Any]= pytorch_dump_folder_path + "/" + WEIGHTS_NAME lowercase__ : Any= pytorch_dump_folder_path + "/" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , A ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(A , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) a : Union[str, Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Any = logging.get_logger(__name__) __A : Optional[Any] = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } __A : Optional[int] = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } __A : Optional[int] = '</w>' __A : List[Any] = '@@ ' def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs # Speech2Text2 has no max input length __A : Any = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4} class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : List[Any]="<pad>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = do_lower_case with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None else: with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split()[:2] ) for merge in merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} @property def _snake_case ( self : str ): return len(self.decoder ) def _snake_case ( self : Optional[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) if word == "\n " + BPE_TOKEN_MERGES: SCREAMING_SNAKE_CASE = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase ): SCREAMING_SNAKE_CASE = word.replace(__lowerCamelCase , "" ) SCREAMING_SNAKE_CASE = word.replace(" " , __lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any ): if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: SCREAMING_SNAKE_CASE = text.lower() SCREAMING_SNAKE_CASE = text.split() SCREAMING_SNAKE_CASE = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) ) return split_tokens def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : List[Any] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = self.decoder.get(__lowerCamelCase , self.unk_token ) return result def _snake_case ( self : List[str] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) # make sure @@ tokens are concatenated SCREAMING_SNAKE_CASE = "".join(string.split(__lowerCamelCase ) ) return string def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return (vocab_file, merges_file)
16
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __snake_case ( ) -> str: """simple docstring""" A = argparse.ArgumentParser() parser.add_argument('--model_ckpt' , type=UpperCamelCase__ , default='microsoft/unixcoder-base-nine' ) parser.add_argument('--num_epochs' , type=UpperCamelCase__ , default=5 ) parser.add_argument('--batch_size' , type=UpperCamelCase__ , default=6 ) parser.add_argument('--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 ) parser.add_argument('--freeze' , type=UpperCamelCase__ , default=UpperCamelCase__ ) parser.add_argument('--learning_rate' , type=UpperCamelCase__ , default=5E-4 ) parser.add_argument('--seed' , type=UpperCamelCase__ , default=0 ) parser.add_argument('--lr_scheduler_type' , type=UpperCamelCase__ , default='cosine' ) parser.add_argument('--num_warmup_steps' , type=UpperCamelCase__ , default=10 ) parser.add_argument('--weight_decay' , type=UpperCamelCase__ , default=0.0_1 ) parser.add_argument('--output_dir' , type=UpperCamelCase__ , default='./results' ) return parser.parse_args() UpperCamelCase : Dict = load("accuracy") def __snake_case ( UpperCamelCase__ ) -> int: """simple docstring""" A , A = eval_pred A = np.argmax(UpperCamelCase__ , axis=1 ) return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ ) class lowerCamelCase__ ( UpperCAmelCase_ ): def __init__( self : Tuple , _lowercase : Union[str, Any] ): super().__init__() A = trainer def __a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Any , **_lowercase : str ): if control.should_evaluate: A = deepcopy(_lowercase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' ) return control_copy def __snake_case ( ) -> int: """simple docstring""" A = get_args() set_seed(args.seed ) A = load_dataset('codeparrot/codecomplex' , split='train' ) A = dataset.train_test_split(test_size=0.2 ) A = train_test['test'].train_test_split(test_size=0.5 ) A = DatasetDict( { 'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test'], } ) print('Loading tokenizer and model' ) A = AutoTokenizer.from_pretrained(args.model_ckpt ) A = tokenizer.eos_token A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) A = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): A = False A = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) ) def tokenize(UpperCamelCase__ ): A = tokenizer(example['src'] , truncation=UpperCamelCase__ , max_length=1024 ) A = labels.straint(example['complexity'] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } A = train_test_validation.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation['train'].column_names , ) A = DataCollatorWithPadding(tokenizer=UpperCamelCase__ ) A = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , ) A = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) print('Training...' ) trainer.add_callback(CustomCallback(UpperCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
715
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class lowerCamelCase__ ( UpperCAmelCase_ ): lowerCAmelCase = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : str , **_lowercase : Union[str, Any] ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A = deprecated_arg[3:] A = not kwargs.pop(_lowercase ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) A = kwargs.pop('tpu_name' , self.tpu_name ) A = kwargs.pop('device_idx' , self.device_idx ) A = kwargs.pop('eager_mode' , self.eager_mode ) A = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**_lowercase ) lowerCAmelCase = field( default=UpperCAmelCase_ , metadata={"""help""": """Name of TPU"""} , ) lowerCAmelCase = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Benchmark models in eager model."""} ) lowerCAmelCase = field( default=UpperCAmelCase_ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __a ( self : Optional[Any] ): requires_backends(self , ['tf'] ) A = None if self.tpu: try: if self.tpu_name: A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A = None return tpu @cached_property def __a ( self : Dict ): requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) A = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU A = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' ) return strategy @property def __a ( self : List[Any] ): requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def __a ( self : Optional[Any] ): requires_backends(self , ['tf'] ) return self._setup_strategy @property def __a ( self : str ): requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def __a ( self : Any ): requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __a ( self : Dict ): return self.n_gpu > 0
91
0
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "new-model" if is_tf_available(): class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "bert-base-cased" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = "bert-base-cased" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Tuple ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : int ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : List[str] ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Tuple ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow @require_tensorflow_probability def _snake_case ( self : Union[str, Any] ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 ) def _snake_case ( self : List[Any] ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = copy.deepcopy(model.config ) SCREAMING_SNAKE_CASE = ["FunnelBaseModel"] SCREAMING_SNAKE_CASE = TFAutoModel.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[Any] ): try: AutoConfig.register("new-model" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config() SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() ) SCREAMING_SNAKE_CASE = auto_class.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = auto_class.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _snake_case ( self : Optional[int] ): with self.assertRaisesRegex( __lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" ) def _snake_case ( self : Any ): with self.assertRaisesRegex( __lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" ) def _snake_case ( self : str ): with self.assertRaisesRegex( __lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def _snake_case ( self : Tuple ): with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def _snake_case ( self : int ): # Make sure we have cached the model. SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
16
'''simple docstring''' from math import isqrt def a__ ( a__ ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(a__ ) + 1 ) ) def a__ ( a__ = 10**6 ): """simple docstring""" __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 7 while prime_candidate < max_prime: primes_count += is_prime(a__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
627
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file _lowerCamelCase : List[Any] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def A__ ( __A : Optional[int]=None ) ->Optional[Any]: if subparsers is not None: __A =subparsers.add_parser('''tpu-config''' , description=_description ) else: __A =argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments __A =parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=__A , default=__A , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=__A , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=__A , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) __A =parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=__A , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=__A ) return parser def A__ ( __A : Union[str, Any] ) ->Optional[Any]: __A =None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__A ): __A =load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: __A =defaults.command_file if not args.command and defaults.commands is not None: __A =defaults.commands if not args.tpu_name: __A =defaults.tpu_name if not args.tpu_zone: __A =defaults.tpu_zone if args.accelerate_version == "dev": __A ='''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": __A ='''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , __A ): __A =F'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: __A =[f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __A ): __A =[line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate __A =['''cd /usr/share'''] if args.install_accelerate: new_cmd += [F'''pip install {args.accelerate_version}'''] new_cmd += args.command __A ='''; '''.join(__A ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess __A =['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F'''Running {' '.join(__A )}''' ) return subprocess.run(__A ) print('''Successfully setup pod.''' ) def A__ ( ) ->str: __A =tpu_command_parser() __A =parser.parse_args() tpu_command_launcher(__A )
516
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor _lowerCamelCase : int = logging.get_logger(__name__) class lowerCAmelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , *lowercase__ , **lowercase__ ): '''simple docstring''' warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , lowercase__ , ) super().__init__(*lowercase__ , **lowercase__ )
516
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCAmelCase = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCAmelCase = { '''facebook/blenderbot_small-90M''': 512, } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : str = BlenderbotSmallTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ): super().__init__( ByteLevelBPETokenizer( vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , ) lowercase = add_prefix_space def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ): lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
84
"""simple docstring""" from collections.abc import Callable import numpy as np def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> np.array: __magic_name__ = int(np.ceil((x_end - xa) / step_size ) ) __magic_name__ = np.zeros((n + 1,) ) __magic_name__ = ya __magic_name__ = xa for k in range(__UpperCamelCase ): __magic_name__ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] ) __magic_name__ = y[k] + ( (step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
490
0
import math def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list: UpperCamelCase__ : Optional[int] = [True] * n UpperCamelCase__ : Tuple = False UpperCamelCase__ : Optional[Any] = False UpperCamelCase__ : Optional[int] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCamelCase__ : Any = i * 2 while index < n: UpperCamelCase__ : str = False UpperCamelCase__ : Union[str, Any] = index + i UpperCamelCase__ : Optional[int] = [2] for i in range(3 , __UpperCAmelCase , 2 ): if is_prime[i]: primes.append(__UpperCAmelCase ) return primes def lowerCAmelCase_ ( __UpperCAmelCase: int = 9999_6666_3333 ) -> int: UpperCamelCase__ : Tuple = math.floor(math.sqrt(__UpperCAmelCase ) ) + 100 UpperCamelCase__ : List[Any] = prime_sieve(__UpperCAmelCase ) UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : List[str] = 0 UpperCamelCase__ : Tuple = primes[prime_index] while (last_prime**2) <= limit: UpperCamelCase__ : Any = primes[prime_index + 1] UpperCamelCase__ : Dict = last_prime**2 UpperCamelCase__ : List[str] = next_prime**2 # Get numbers divisible by lps(current) UpperCamelCase__ : Dict = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCamelCase__ : Dict = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCamelCase__ : Tuple = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCamelCase__ : Union[str, Any] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
369
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: str ) -> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file UpperCamelCase__ : Any = TapasConfig.from_json_file(__UpperCAmelCase ) # set absolute/relative position embeddings parameter UpperCamelCase__ : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": UpperCamelCase__ : List[Any] = TapasForQuestionAnswering(config=__UpperCAmelCase ) elif task == "WTQ": # run_task_main.py hparams UpperCamelCase__ : Any = 4 UpperCamelCase__ : List[str] = True # hparam_utils.py hparams UpperCamelCase__ : int = 0.664694 UpperCamelCase__ : Union[str, Any] = 0.207951 UpperCamelCase__ : Any = 0.121194 UpperCamelCase__ : int = True UpperCamelCase__ : Any = True UpperCamelCase__ : str = False UpperCamelCase__ : int = 0.0352513 UpperCamelCase__ : Dict = TapasForQuestionAnswering(config=__UpperCAmelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams UpperCamelCase__ : Any = 4 UpperCamelCase__ : List[str] = False # hparam_utils.py hparams UpperCamelCase__ : int = 36.4519 UpperCamelCase__ : int = 0.903421 UpperCamelCase__ : List[str] = 222.088 UpperCamelCase__ : Dict = True UpperCamelCase__ : List[str] = True UpperCamelCase__ : Any = True UpperCamelCase__ : int = 0.763141 UpperCamelCase__ : Union[str, Any] = TapasForQuestionAnswering(config=__UpperCAmelCase ) elif task == "TABFACT": UpperCamelCase__ : List[Any] = TapasForSequenceClassification(config=__UpperCAmelCase ) elif task == "MLM": UpperCamelCase__ : Optional[Any] = TapasForMaskedLM(config=__UpperCAmelCase ) elif task == "INTERMEDIATE_PRETRAINING": UpperCamelCase__ : str = TapasModel(config=__UpperCAmelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__UpperCAmelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) UpperCamelCase__ : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 ) tokenizer.save_pretrained(__UpperCAmelCase ) print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
369
1
import torch from diffusers import DiffusionPipeline class _lowerCamelCase( _a ): def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase) def __call__( self) -> Any: """simple docstring""" _lowercase : Union[str, Any] = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) _lowercase : int = 1 _lowercase : int = self.unet(lowerCamelCase, lowerCamelCase).sample _lowercase : Tuple = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample _lowercase : str = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase) return result
89
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def a_ ( _A , _A ) -> List[Any]: """simple docstring""" snake_case__ = old_name if "patch_embed" in old_name: snake_case__ , snake_case__ , snake_case__ = old_name.split('.' ) if layer == "0": snake_case__ = old_name.replace('0' , 'convolution1' ) elif layer == "1": snake_case__ = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": snake_case__ = old_name.replace('3' , 'convolution2' ) else: snake_case__ = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , _A ): snake_case__ = R'\b\d{2}\b' if bool(re.search(_A , _A ) ): snake_case__ = re.search(R'\d\.\d\d.' , _A ).group() else: snake_case__ = re.search(R'\d\.\d.' , _A ).group() if int(match[0] ) < 6: snake_case__ = old_name.replace(_A , '' ) snake_case__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) snake_case__ = 'intermediate_stages.' + trimmed_name else: snake_case__ = old_name.replace(_A , '' ) if int(match[2] ) < num_meta4D_last_stage: snake_case__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: snake_case__ = str(int(match[2] ) - num_meta4D_last_stage ) snake_case__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: snake_case__ = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: snake_case__ = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: snake_case__ = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: snake_case__ = trimmed_name.replace('fc2' , 'linear_out' ) snake_case__ = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , _A ): snake_case__ = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: snake_case__ = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): snake_case__ = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): snake_case__ = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: snake_case__ = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: snake_case__ = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: snake_case__ = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: snake_case__ = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": snake_case__ = new_name.replace('norm' , 'layernorm' ) snake_case__ = 'efficientformer.' + new_name else: snake_case__ = 'efficientformer.encoder.' + new_name return new_name def a_ ( _A , _A ) -> Optional[Any]: """simple docstring""" for key in checkpoint.copy().keys(): snake_case__ = checkpoint.pop(_A ) snake_case__ = val return checkpoint def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ = Image.open(requests.get(_A , stream=_A ).raw ) return image def a_ ( _A , _A , _A , _A ) -> Optional[Any]: """simple docstring""" snake_case__ = torch.load(_A , map_location='cpu' )['model'] snake_case__ = EfficientFormerConfig.from_json_file(_A ) snake_case__ = EfficientFormerForImageClassificationWithTeacher(_A ) snake_case__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) snake_case__ = config.depths[-1] - config.num_metaad_blocks + 1 snake_case__ = convert_torch_checkpoint(_A , _A ) model.load_state_dict(_A ) model.eval() snake_case__ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image snake_case__ = prepare_img() snake_case__ = 256 snake_case__ = 224 snake_case__ = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) snake_case__ = processor(images=_A , return_tensors='pt' ).pixel_values # original processing pipeline snake_case__ = Compose( [ Resize(_A , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(_A ), ToTensor(), Normalize(_A , _A ), ] ) snake_case__ = image_transforms(_A ).unsqueeze(0 ) assert torch.allclose(_A , _A ) snake_case__ = model(_A ) snake_case__ = outputs.logits snake_case__ = (1, 1000) if "l1" in model_name: snake_case__ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , _A , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: snake_case__ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , _A , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: snake_case__ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' ) # Save Checkpoints Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) processor.save_pretrained(_A ) print(f'''Processor successfuly saved at {pytorch_dump_path}''' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=_A , ) processor.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=_A , ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) __UpperCamelCase : Dict = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
328
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: UpperCamelCase :Any = 1024 UpperCamelCase :List[Any] = 4096 UpperCamelCase :str = 24 UpperCamelCase :Optional[Any] = 16 UpperCamelCase :Union[str, Any] = [5, 11, 17, 23] UpperCamelCase :Tuple = [256, 512, 1024, 1024] UpperCamelCase :Union[str, Any] = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: UpperCamelCase :Dict = 768 UpperCamelCase :Dict = [1, 1, 1, 0.5] UpperCamelCase :Optional[int] = [256, 512, 768, 768] UpperCamelCase :str = 150 UpperCamelCase :Tuple = 16 UpperCamelCase :Dict = (1, 384, 384) UpperCamelCase :Union[str, Any] = False UpperCamelCase :Tuple = """project""" if "ade" in checkpoint_url: UpperCamelCase :int = True UpperCamelCase :List[str] = 768 UpperCamelCase :Tuple = [1, 1, 1, 0.5] UpperCamelCase :Union[str, Any] = 150 UpperCamelCase :Optional[Any] = 16 UpperCamelCase :Optional[Any] = """huggingface/label-files""" UpperCamelCase :int = """ade20k-id2label.json""" UpperCamelCase :int = json.load(open(cached_download(hf_hub_url(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) ) , """r""" ) ) UpperCamelCase :int = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCamelCase :Any = idalabel UpperCamelCase :Optional[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase :List[Any] = [1, 150, 480, 480] return config, expected_shape def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> List[Any]: """simple docstring""" UpperCamelCase :List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[str]: """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase :Any = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: UpperCamelCase :Optional[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: UpperCamelCase :List[Any] = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: UpperCamelCase :Dict = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: UpperCamelCase :Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: UpperCamelCase :Union[str, Any] = name.replace("""proj""" , """projection""" ) if "blocks" in name: UpperCamelCase :Optional[int] = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: UpperCamelCase :Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCamelCase :str = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: UpperCamelCase :Optional[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: UpperCamelCase :Dict = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: UpperCamelCase :List[str] = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: UpperCamelCase :Union[str, Any] = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: UpperCamelCase :Optional[int] = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: UpperCamelCase :Tuple = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: UpperCamelCase :List[Any] = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: UpperCamelCase :Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: UpperCamelCase :Optional[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase :Any = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: UpperCamelCase :Tuple = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: UpperCamelCase :Any = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: UpperCamelCase :Tuple = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: UpperCamelCase :Union[str, Any] = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: UpperCamelCase :int = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase :Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase :Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase :Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase :Union[str, Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase :Tuple = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: UpperCamelCase :List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: UpperCamelCase :Any = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: UpperCamelCase :Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: UpperCamelCase :Any = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: UpperCamelCase :int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: UpperCamelCase :Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: UpperCamelCase :Union[str, Any] = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: UpperCamelCase :Any = name.replace("""bn""" , """batch_norm""" ) if "head" in name: UpperCamelCase :List[Any] = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: UpperCamelCase :Dict = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: UpperCamelCase :Optional[int] = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: UpperCamelCase :Optional[int] = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: UpperCamelCase :Optional[Any] = name.replace("""..""" , """.""" ) if "stem.conv" in name: UpperCamelCase :List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: UpperCamelCase :Union[str, Any] = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: UpperCamelCase :List[str] = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: UpperCamelCase :Tuple = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: UpperCamelCase :Optional[int] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: UpperCamelCase :Optional[Any] = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: UpperCamelCase :Union[str, Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase :Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) UpperCamelCase :Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase :Tuple = in_proj_weight[: config.hidden_size, :] UpperCamelCase :Any = in_proj_bias[: config.hidden_size] UpperCamelCase :int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase :Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase :Optional[int] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase :Dict = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase :Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : int ) -> List[str]: """simple docstring""" UpperCamelCase :List[Any] = get_dpt_config(__magic_name__ ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCamelCase :int = torch.load(__magic_name__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__magic_name__ ) # rename keys for key in state_dict.copy().keys(): UpperCamelCase :Optional[int] = state_dict.pop(__magic_name__ ) UpperCamelCase :List[Any] = val # read in qkv matrices read_in_q_k_v(__magic_name__ , __magic_name__ ) # load HuggingFace model UpperCamelCase :Optional[Any] = DPTForSemanticSegmentation(__magic_name__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() # Check outputs on an image UpperCamelCase :Dict = 480 if """ade""" in checkpoint_url else 384 UpperCamelCase :str = DPTImageProcessor(size=__magic_name__ ) UpperCamelCase :List[str] = prepare_img() UpperCamelCase :Tuple = image_processor(__magic_name__ , return_tensors="""pt""" ) # forward pass UpperCamelCase :Optional[int] = model(**__magic_name__ ).logits if """ade""" in checkpoint_url else model(**__magic_name__ ).predicted_depth if show_prediction: UpperCamelCase :Union[str, Any] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__magic_name__ , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) parser.add_argument( '''--show_prediction''', action='''store_true''', ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
718
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :List[str] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] UpperCamelCase :Optional[Any] = 6 UpperCamelCase :Optional[int] = 1 UpperCamelCase :Union[str, Any] = 1901 UpperCamelCase :Any = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 UpperCamelCase :Dict = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 UpperCamelCase :Any = day - 29 else: if day > days_per_month[month - 1]: month += 1 UpperCamelCase :List[Any] = day - days_per_month[month - 2] if month > 12: year += 1 UpperCamelCase :Dict = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
590
0
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __A ( UpperCamelCase__ ): a__ : Optional[int] = ["""image_processor""", """tokenizer"""] a__ : str = """OwlViTImageProcessor""" a__ : int = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__(self : Optional[int] , __a : List[Any]=None , __a : str=None , **__a : List[Any] ): UpperCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) UpperCAmelCase_ = kwargs.pop("feature_extractor" ) UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__(self : int , __a : List[str]=None , __a : Union[str, Any]=None , __a : int=None , __a : Dict="max_length" , __a : List[Any]="np" , **__a : Tuple ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): UpperCAmelCase_ = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): UpperCAmelCase_ = [] # Maximum number of queries across batch UpperCAmelCase_ = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: UpperCAmelCase_ = t + [" "] * (max_num_queries - len(__a )) UpperCAmelCase_ = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": UpperCAmelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp UpperCAmelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch UpperCAmelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) UpperCAmelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf UpperCAmelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) UpperCAmelCase_ = BatchEncoding() UpperCAmelCase_ = input_ids UpperCAmelCase_ = attention_mask if query_images is not None: UpperCAmelCase_ = BatchEncoding() UpperCAmelCase_ = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values UpperCAmelCase_ = query_pixel_values if images is not None: UpperCAmelCase_ = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: UpperCAmelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: UpperCAmelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def _lowercase (self : List[str] , *__a : Optional[Any] , **__a : Dict ): return self.image_processor.post_process(*__a , **__a ) def _lowercase (self : List[Any] , *__a : Optional[int] , **__a : Optional[int] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def _lowercase (self : Optional[int] , *__a : int , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def _lowercase (self : List[str] , *__a : List[str] , **__a : List[str] ): return self.tokenizer.batch_decode(*__a , **__a ) def _lowercase (self : int , *__a : List[Any] , **__a : Tuple ): return self.tokenizer.decode(*__a , **__a ) @property def _lowercase (self : Optional[Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def _lowercase (self : Union[str, Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
78
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } UpperCamelCase = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } UpperCamelCase = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Dict = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[str] = ElectraTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> str: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): A__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("type" ) ) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**SCREAMING_SNAKE_CASE__ ) A__ = do_lower_case def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[str]: A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
104
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = 384 UpperCAmelCase_ = 7 if "tiny" in model_name: UpperCAmelCase_ = 96 UpperCAmelCase_ = (2, 2, 6, 2) UpperCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: UpperCAmelCase_ = 96 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: UpperCAmelCase_ = 128 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (4, 8, 16, 32) UpperCAmelCase_ = 12 UpperCAmelCase_ = 512 elif "large" in model_name: UpperCAmelCase_ = 192 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (6, 12, 24, 48) UpperCAmelCase_ = 12 UpperCAmelCase_ = 768 # set label information UpperCAmelCase_ = 150 UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "ade20k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = SwinConfig( embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , window_size=lowerCAmelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) UpperCAmelCase_ = UperNetConfig( backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = dct.pop(lowerCAmelCase__ ) UpperCAmelCase_ = val def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ , UpperCAmelCase_ = x.shape UpperCAmelCase_ = x.reshape(lowerCAmelCase__ , 4 , in_channel // 4 ) UpperCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) return x def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ , UpperCAmelCase_ = x.shape UpperCAmelCase_ = x.reshape(lowerCAmelCase__ , in_channel // 4 , 4 ) UpperCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) return x def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = x.shape[0] UpperCAmelCase_ = x.reshape(4 , in_channel // 4 ) UpperCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCAmelCase__ ) return x def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = x.shape[0] UpperCAmelCase_ = x.reshape(in_channel // 4 , 4 ) UpperCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCAmelCase__ ) return x def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } UpperCAmelCase_ = model_name_to_url[model_name] UpperCAmelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" , file_name=lowerCAmelCase__ )[ "state_dict" ] for name, param in state_dict.items(): print(lowerCAmelCase__ , param.shape ) UpperCAmelCase_ = get_upernet_config(lowerCAmelCase__ ) UpperCAmelCase_ = UperNetForSemanticSegmentation(lowerCAmelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ ) if "bn" in key: UpperCAmelCase_ = key.replace("bn" , "batch_norm" ) UpperCAmelCase_ = val # rename keys UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: UpperCAmelCase_ = reverse_correct_unfold_reduction_order(lowerCAmelCase__ ) if "norm" in key: UpperCAmelCase_ = reverse_correct_unfold_norm_order(lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) # verify on image UpperCAmelCase_ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) UpperCAmelCase_ = SegformerImageProcessor() UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values with torch.no_grad(): UpperCAmelCase_ = model(lowerCAmelCase__ ) UpperCAmelCase_ = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": UpperCAmelCase_ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": UpperCAmelCase_ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": UpperCAmelCase_ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": UpperCAmelCase_ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } lowerCamelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): for attribute in key.split("." ): UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase_ = value elif weight_type == "weight_g": UpperCAmelCase_ = value elif weight_type == "weight_v": UpperCAmelCase_ = value elif weight_type == "bias": UpperCAmelCase_ = value elif weight_type == "running_mean": UpperCAmelCase_ = value elif weight_type == "running_var": UpperCAmelCase_ = value elif weight_type == "num_batches_tracked": UpperCAmelCase_ = value elif weight_type == "inv_freq": UpperCAmelCase_ = value else: UpperCAmelCase_ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = [] UpperCAmelCase_ = fairseq_model.state_dict() UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase_ = True if "*" in mapped_key: UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2] UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ ) if "pos_bias_u" in name: UpperCAmelCase_ = None elif "pos_bias_v" in name: UpperCAmelCase_ = None elif "weight_g" in name: UpperCAmelCase_ = "weight_g" elif "weight_v" in name: UpperCAmelCase_ = "weight_v" elif "bias" in name: UpperCAmelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ = "weight" elif "running_mean" in name: UpperCAmelCase_ = "running_mean" elif "inv_freq" in name: UpperCAmelCase_ = "inv_freq" elif "running_var" in name: UpperCAmelCase_ = "running_var" elif "num_batches_tracked" in name: UpperCAmelCase_ = "num_batches_tracked" else: UpperCAmelCase_ = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = full_name.split("conv_layers." )[-1] UpperCAmelCase_ = name.split("." ) UpperCAmelCase_ = int(items[0] ) UpperCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ): if config_path is not None: UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" ) else: UpperCAmelCase_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCAmelCase_ = "rotary" if is_finetuned: if dict_path: UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ = target_dict.pad_index UpperCAmelCase_ = target_dict.bos_index UpperCAmelCase_ = target_dict.eos_index UpperCAmelCase_ = len(target_dict.symbols ) UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" ) if not os.path.isdir(lowerCAmelCase__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) UpperCAmelCase_ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase_ = 0 UpperCAmelCase_ = 1 with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_ = WavaVecaCTCTokenizer( lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , ) UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False UpperCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ ) else: UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ ) if is_finetuned: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" ) UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ ) UpperCAmelCase_ = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowerCamelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
14
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"], "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["BertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", "BertForPreTraining", "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", "load_tf_weights_in_bert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", "TFBertForPreTraining", "TFBertForQuestionAnswering", "TFBertForSequenceClassification", "TFBertForTokenClassification", "TFBertLMHeadModel", "TFBertMainLayer", "TFBertModel", "TFBertPreTrainedModel", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["TFBertTokenizer"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "FlaxBertForCausalLM", "FlaxBertForMaskedLM", "FlaxBertForMultipleChoice", "FlaxBertForNextSentencePrediction", "FlaxBertForPreTraining", "FlaxBertForQuestionAnswering", "FlaxBertForSequenceClassification", "FlaxBertForTokenClassification", "FlaxBertModel", "FlaxBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
from collections import namedtuple A_ : Tuple = namedtuple('from_to', 'from_ to') A_ : int = { 'cubicmeter': from_to(1, 1), 'litre': from_to(0.001, 1000), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.00454, 264.172), 'cubicyard': from_to(0.76455, 1.30795), 'cubicfoot': from_to(0.028, 35.3147), 'cup': from_to(0.000236588, 4226.75), } def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n''' + ', '.join(UpperCAmelCase__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n''' + ', '.join(UpperCAmelCase__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
57
0
"""simple docstring""" def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )): raise ValueError("longest_common_substring() takes two strings for inputs" ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] UpperCamelCase = 0 UpperCamelCase = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: UpperCamelCase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: UpperCamelCase = i UpperCamelCase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
544
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
544
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case__ ( UpperCamelCase): a_ = ["image_processor", "tokenizer"] a_ = "ChineseCLIPImageProcessor" a_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self : List[Any] , _A : Dict=None , _A : List[Any]=None , **_A : str ) -> Any: UpperCAmelCase_ : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) UpperCAmelCase_ : Optional[Any] = kwargs.pop('''feature_extractor''' ) UpperCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_A , _A ) UpperCAmelCase_ : List[Any] = self.image_processor def __call__( self : Dict , _A : Optional[int]=None , _A : Dict=None , _A : Optional[Any]=None , **_A : Tuple ) -> Any: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase_ : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: UpperCAmelCase_ : Dict = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: UpperCAmelCase_ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def A ( self : List[Any] , *_A : Any , **_A : Optional[int] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*_A , **_A ) def A ( self : Any , *_A : Union[str, Any] , **_A : Tuple ) -> List[Any]: return self.tokenizer.decode(*_A , **_A ) @property def A ( self : Tuple ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names UpperCAmelCase_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ) -> List[str]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , ) return self.image_processor_class
541
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _UpperCamelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class snake_case__ ( nn.Module): def __init__( self : Optional[int] , _A : Tuple ) -> List[str]: super().__init__() UpperCAmelCase_ : Tuple = torchvision.models.resnetaaa(pretrained=_A ) UpperCAmelCase_ : Union[str, Any] = list(model.children() )[:-2] UpperCAmelCase_ : Union[str, Any] = nn.Sequential(*_A ) UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def A ( self : str , _A : Optional[int] ) -> str: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 UpperCAmelCase_ : List[str] = self.pool(self.model(_A ) ) UpperCAmelCase_ : Tuple = torch.flatten(_A , start_dim=2 ) UpperCAmelCase_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class snake_case__ ( UpperCamelCase): def __init__( self : Optional[int] , _A : int , _A : str , _A : int , _A : Dict , _A : int ) -> List[str]: UpperCAmelCase_ : Any = [json.loads(_A ) for l in open(_A )] UpperCAmelCase_ : Tuple = os.path.dirname(_A ) UpperCAmelCase_ : Any = tokenizer UpperCAmelCase_ : Optional[Any] = labels UpperCAmelCase_ : List[str] = len(_A ) UpperCAmelCase_ : int = max_seq_length UpperCAmelCase_ : str = transforms def __len__( self : Tuple ) -> Tuple: return len(self.data ) def __getitem__( self : Union[str, Any] , _A : Tuple ) -> List[Any]: UpperCAmelCase_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_A ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sentence[0], sentence[1:-1], sentence[-1] UpperCAmelCase_ : List[str] = sentence[: self.max_seq_length] UpperCAmelCase_ : List[str] = torch.zeros(self.n_classes ) UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) UpperCAmelCase_ : List[Any] = self.transforms(_A ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def A ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def __UpperCAmelCase ( A : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : int = [len(row['''sentence'''] ) for row in batch] UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(A ), max(A ) UpperCAmelCase_ : Tuple = torch.zeros(A , A , dtype=torch.long ) UpperCAmelCase_ : Dict = torch.zeros(A , A , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(A , A ) ): UpperCAmelCase_ : int = input_row['''sentence'''] UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Union[str, Any] = torch.stack([row['''image'''] for row in batch] ) UpperCAmelCase_ : Optional[int] = torch.stack([row['''label'''] for row in batch] ) UpperCAmelCase_ : Any = torch.stack([row['''image_start_token'''] for row in batch] ) UpperCAmelCase_ : int = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def __UpperCAmelCase ( ) -> Union[str, Any]: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def __UpperCAmelCase ( ) -> Union[str, Any]: return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
541
1
from __future__ import annotations def a_ (_lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : int )-> list[tuple[int, int]]: snake_case: Tuple = position snake_case: List[Any] = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] snake_case: Optional[Any] = [] for position in positions: snake_case: Union[str, Any] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_lowerCamelCase ) return permissible_positions def a_ (_lowerCAmelCase : list[list[int]] )-> bool: return not any(elem == 0 for row in board for elem in row ) def a_ (_lowerCAmelCase : list[list[int]] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : int )-> bool: if is_complete(_lowerCamelCase ): return True for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ): snake_case: List[str] = position if board[y][x] == 0: snake_case: Optional[Any] = curr + 1 if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ): return True snake_case: int = 0 return False def a_ (_lowerCAmelCase : int )-> list[list[int]]: snake_case: Any = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): snake_case: Optional[int] = 1 if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ): return board snake_case: Any = 0 snake_case: Optional[int] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
706
import re def a_ (_lowerCAmelCase : str )-> list: return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )] def a_ (_lowerCAmelCase : str )-> str: snake_case: Tuple = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str )-> str: try: snake_case: int = split_input(_lowerCAmelCase ) if upper: snake_case: List[Any] = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: snake_case: Dict = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def a_ (_lowerCAmelCase : str )-> str: return to_simple_case(_lowerCAmelCase ) def a_ (_lowerCAmelCase : str )-> str: try: snake_case: Tuple = to_simple_case(_lowerCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool )-> str: return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , """_""" ) def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool )-> str: return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , """-""" ) if __name__ == "__main__": __import__('doctest').testmod()
164
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Optional[Any] = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) _lowercase : Any = DetaConfig( backbone_config=SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE , with_box_refine=SCREAMING_SNAKE_CASE , two_stage=SCREAMING_SNAKE_CASE , ) # set labels _lowercase : int = 'huggingface/label-files' if "o365" in model_name: _lowercase : Any = 366 _lowercase : Dict = 'object365-id2label.json' else: _lowercase : str = 91 _lowercase : Union[str, Any] = 'coco-detection-id2label.json' _lowercase : Optional[int] = num_labels _lowercase : Dict = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) ) _lowercase : List[str] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _lowercase : Any = idalabel _lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : Union[str, Any] = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE ) _lowercase : List[str] = val def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowercase : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowercase : str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _lowercase : Optional[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : List[str] = in_proj_weight[:dim, :] _lowercase : List[str] = in_proj_bias[: dim] _lowercase : str = in_proj_weight[ dim : dim * 2, : ] _lowercase : Union[str, Any] = in_proj_bias[ dim : dim * 2 ] _lowercase : Dict = in_proj_weight[ -dim :, : ] _lowercase : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: # transformer decoder self-attention layers _lowercase : str = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _lowercase : Dict = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _lowercase : str = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : List[Any] = in_proj_weight[:hidden_size, :] _lowercase : Optional[Any] = in_proj_bias[:hidden_size] _lowercase : List[str] = in_proj_weight[ hidden_size : hidden_size * 2, : ] _lowercase : str = in_proj_bias[hidden_size : hidden_size * 2] _lowercase : str = in_proj_weight[-hidden_size:, :] _lowercase : Dict = in_proj_bias[-hidden_size:] def __magic_name__ ( ) -> Optional[int]: _lowercase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Dict = get_deta_config(SCREAMING_SNAKE_CASE ) # load original state dict if model_name == "deta-swin-large": _lowercase : int = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": _lowercase : Dict = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(F"""Model name {model_name} not supported""" ) _lowercase : str = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE , param.shape ) # rename keys _lowercase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) read_in_swin_q_k_v(SCREAMING_SNAKE_CASE , config.backbone_config ) read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _lowercase : int = state_dict.pop(SCREAMING_SNAKE_CASE ) _lowercase : List[str] = val if "input_proj" in key: _lowercase : Any = state_dict.pop(SCREAMING_SNAKE_CASE ) _lowercase : int = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _lowercase : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE ) _lowercase : Dict = val # finally, create HuggingFace model and load state dict _lowercase : Optional[Any] = DetaForObjectDetection(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() _lowercase : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(SCREAMING_SNAKE_CASE ) # load image processor _lowercase : Optional[int] = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image _lowercase : int = prepare_img() _lowercase : Tuple = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) _lowercase : Union[str, Any] = encoding['pixel_values'] _lowercase : Any = model(pixel_values.to(SCREAMING_SNAKE_CASE ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _lowercase : Tuple = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) _lowercase : Dict = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": _lowercase : Union[str, Any] = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) _lowercase : Dict = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE ) , atol=1E-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(F"""jozhang97/{model_name}""" ) processor.push_to_hub(F"""jozhang97/{model_name}""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __a : Union[str, Any] = logging.get_logger(__name__) __a : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __a : Dict = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } __a : List[str] = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class __lowercase ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE = RobertaTokenizer def __init__( self : List[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]="replace" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Optional[int]="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Union[str, Any] , ): """simple docstring""" super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , ) __A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space: __A = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) ) __A = add_prefix_space __A = pre_tok_class(**UpperCamelCase_ ) __A = add_prefix_space __A = """post_processor""" __A = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) if tokenizer_component_instance: __A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __A = tuple(state["""sep"""] ) if "cls" in state: __A = tuple(state["""cls"""] ) __A = False if state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space: __A = add_prefix_space __A = True if state.get("""trim_offsets""" , UpperCamelCase_ ) != trim_offsets: __A = trim_offsets __A = True if changes_to_apply: __A = getattr(UpperCamelCase_ , state.pop("""type""" ) ) __A = component_class(**UpperCamelCase_ ) setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCAmelCase_ ( self : str ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple ): """simple docstring""" __A = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value __A = value def lowerCAmelCase_ ( self : Tuple , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ): """simple docstring""" __A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ): """simple docstring""" __A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): """simple docstring""" __A = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int=None ): """simple docstring""" __A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase_ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): """simple docstring""" __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
637
0
"""simple docstring""" import numpy as np class __a : '''simple docstring''' def __init__( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = (0, 0) SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Optional[int] = 0 SCREAMING_SNAKE_CASE__ : int = 0 def __eq__( self , _a ) -> str: """simple docstring""" return self.position == cell.position def _a ( self ) -> Union[str, Any]: """simple docstring""" print(self.position ) class __a : '''simple docstring''' def __init__( self , _a=(5, 5) ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = np.zeros(_a ) SCREAMING_SNAKE_CASE__ : List[str] = world_size[0] SCREAMING_SNAKE_CASE__ : Any = world_size[1] def _a ( self ) -> Dict: """simple docstring""" print(self.w ) def _a ( self , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] SCREAMING_SNAKE_CASE__ : int = cell.position[0] SCREAMING_SNAKE_CASE__ : List[Any] = cell.position[1] SCREAMING_SNAKE_CASE__ : Any = [] for n in neughbour_cord: SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_x + n[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: SCREAMING_SNAKE_CASE__ : Optional[int] = Cell() SCREAMING_SNAKE_CASE__ : Dict = (x, y) SCREAMING_SNAKE_CASE__ : int = cell neighbours.append(_a ) return neighbours def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Any = [] SCREAMING_SNAKE_CASE__ : Dict = [] _open.append(__lowerCAmelCase ) while _open: SCREAMING_SNAKE_CASE__ : Any = np.argmin([n.f for n in _open] ) SCREAMING_SNAKE_CASE__ : str = _open[min_f] _closed.append(_open.pop(__lowerCAmelCase ) ) if current == goal: break for n in world.get_neigbours(__lowerCAmelCase ): for c in _closed: if c == n: continue SCREAMING_SNAKE_CASE__ : Optional[Any] = current.g + 1 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = n.position SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = goal.position SCREAMING_SNAKE_CASE__ : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2 SCREAMING_SNAKE_CASE__ : Any = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = [] while current.parent is not None: path.append(current.position ) SCREAMING_SNAKE_CASE__ : Tuple = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": a :Optional[Any] = Gridworld() # Start position and goal a :str = Cell() a :Union[str, Any] = (0, 0) a :Dict = Cell() a :str = (4, 4) print(f'path from {start.position} to {goal.position}') a :List[str] = astar(world, start, goal) # Just for visual reasons. for i in s: a :str = 1 print(world.w)
12
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a :Union[str, Any] = logging.getLogger(__name__) @dataclass class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[int] = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) @dataclass class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :str = field( default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) _SCREAMING_SNAKE_CASE :str = field( default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _SCREAMING_SNAKE_CASE :Optional[bool] = field( default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _SCREAMING_SNAKE_CASE :str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def _lowercase ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , __lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[Any] = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) datasets.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE__ : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: SCREAMING_SNAKE_CASE__ : str = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_dataset.features["""label"""].names if training_args.do_eval: SCREAMING_SNAKE_CASE__ : int = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.features["""label"""].names if training_args.do_predict: SCREAMING_SNAKE_CASE__ : int = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.features["""label"""].names # Labels SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE__ : str = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch SCREAMING_SNAKE_CASE__ : Optional[Any] = False def preprocess_function(__lowerCAmelCase ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(len(__lowerCAmelCase ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE__ : str = train_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE__ : Any = min(len(__lowerCAmelCase ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : List[str] = eval_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: SCREAMING_SNAKE_CASE__ : int = min(len(__lowerCAmelCase ) , data_args.max_predict_samples ) SCREAMING_SNAKE_CASE__ : List[Any] = predict_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function SCREAMING_SNAKE_CASE__ : Optional[Any] = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Dict = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 ) return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE__ : List[Any] = default_data_collator elif training_args.fpaa: SCREAMING_SNAKE_CASE__ : int = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # Initialize our Trainer SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE__ : Dict = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_checkpoint SCREAMING_SNAKE_CASE__ : str = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = train_result.metrics SCREAMING_SNAKE_CASE__ : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Dict = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __lowerCAmelCase ) trainer.save_metrics("""train""" , __lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE__ : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics("""eval""" , __lowerCAmelCase ) trainer.save_metrics("""eval""" , __lowerCAmelCase ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" ) SCREAMING_SNAKE_CASE__ : List[str] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics("""predict""" , __lowerCAmelCase ) trainer.save_metrics("""predict""" , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = np.argmax(__lowerCAmelCase , axis=1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Optional[int] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
12
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = is_training UpperCamelCase_ = use_auxiliary_loss UpperCamelCase_ = num_queries UpperCamelCase_ = num_channels UpperCamelCase_ = min_size UpperCamelCase_ = max_size UpperCamelCase_ = num_labels UpperCamelCase_ = hidden_dim UpperCamelCase_ = hidden_dim def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCAmelCase ) UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase ) UpperCamelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5 ).float() UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long() UpperCamelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCamelCase_ = self.num_queries UpperCamelCase_ = self.num_labels UpperCamelCase_ = [1, 1, 1, 1] UpperCamelCase_ = self.num_channels UpperCamelCase_ = 64 UpperCamelCase_ = 128 UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim return config def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = output.encoder_hidden_states UpperCamelCase_ = output.pixel_decoder_hidden_states UpperCamelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: with torch.no_grad(): UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() def comm_check_on_output(_UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) UpperCamelCase_ = model( pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = (self.model_tester.min_size,) * 2 UpperCamelCase_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(), } UpperCamelCase_ = self.model_tester.get_config() UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) UpperCamelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : List[Any] = 1E-4 def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_vision @slow class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) UpperCamelCase_ = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # masks_queries_logits UpperCamelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCamelCase_ = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) # class_queries_logits UpperCamelCase_ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCamelCase_ = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase ) UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']] UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
23
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 1000000 ) -> int: _a : Optional[int] = set(range(3 , lowerCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , lowerCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) ) _a : Tuple = [float(lowerCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f"""{solution() = }""")
358
0
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __a = '\\n\n' __a = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' __a = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase__( datasets.Metric ): """simple docstring""" def _lowercase ( self : List[str] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 1_6 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> str: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowercase_ = '''cuda''' else: lowercase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' lowercase_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ = model.to(SCREAMING_SNAKE_CASE_ ) lowercase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowercase_ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(SCREAMING_SNAKE_CASE_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowercase_ = model.config.max_length - 1 else: lowercase_ = model.config.max_length lowercase_ = tokenizer( SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = encodings['''input_ids'''] lowercase_ = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowercase_ = [] lowercase_ = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ): lowercase_ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = encoded_texts[start_index:end_index] lowercase_ = attn_masks[start_index:end_index] if add_start_token: lowercase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowercase_ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] , dim=1 ) lowercase_ = encoded_batch with torch.no_grad(): lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).logits lowercase_ = out_logits[..., :-1, :].contiguous() lowercase_ = labels[..., 1:].contiguous() lowercase_ = attn_mask[..., 1:].contiguous() lowercase_ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
708
def a ( snake_case__: int ): '''simple docstring''' if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) lowercase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase_ = 1 if upper_limit > 0: lowercase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: __a = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
409
0
"""simple docstring""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = "linear" _UpperCAmelCase :int = "cosine" _UpperCAmelCase :Union[str, Any] = "cosine_with_restarts" _UpperCAmelCase :List[str] = "polynomial" _UpperCAmelCase :List[str] = "constant" _UpperCAmelCase :Optional[Any] = "constant_with_warmup" _UpperCAmelCase :int = "piecewise_constant" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> str: return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Any: def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) ) return 1.0 return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: lowercase__: List[Any] = {} lowercase__: Dict = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowercase__, lowercase__: Union[str, Any] = rule_str.split(''':''' ) lowercase__: Optional[int] = int(__UpperCAmelCase ) lowercase__: List[str] = float(__UpperCAmelCase ) lowercase__: Tuple = value lowercase__: List[str] = float(rule_list[-1] ) def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ): def rule_func(__UpperCAmelCase ) -> float: lowercase__: Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowercase__: Optional[Any] = create_rules_function(__UpperCAmelCase , __UpperCAmelCase ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> Optional[Any]: def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> str: def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) lowercase__: str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple: def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) lowercase__: int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Optional[int]: lowercase__: Tuple = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowercase__: Any = lr_init - lr_end lowercase__: int = num_training_steps - num_warmup_steps lowercase__: List[str] = 1 - (current_step - num_warmup_steps) / decay_steps lowercase__: Union[str, Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __A = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str: lowercase__: int = SchedulerType(__UpperCAmelCase ) lowercase__: Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
586
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
586
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __magic_name__ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } __magic_name__ = { '''unc-nlp/lxmert-base-uncased''': 512, } __magic_name__ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class a__ ( _snake_case ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : int = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Union[str, Any] = LxmertTokenizer def __init__( self :Any , lowercase__ :Optional[int]=None , lowercase__ :Union[str, Any]=None , lowercase__ :Union[str, Any]=True , lowercase__ :Optional[Any]="[UNK]" , lowercase__ :Union[str, Any]="[SEP]" , lowercase__ :Union[str, Any]="[PAD]" , lowercase__ :int="[CLS]" , lowercase__ :Optional[int]="[MASK]" , lowercase__ :str=True , lowercase__ :Any=None , **lowercase__ :Tuple , ): super().__init__( lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , ) lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowercase__ ) != do_lower_case or normalizer_state.get('strip_accents' , lowercase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowercase__ ) != tokenize_chinese_chars ): lowercase = getattr(lowercase__ , normalizer_state.pop('type' ) ) lowercase = do_lower_case lowercase = strip_accents lowercase = tokenize_chinese_chars lowercase = normalizer_class(**lowercase__ ) lowercase = do_lower_case def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :List[Any] , lowercase__ :int=None ): lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self :int , lowercase__ :List[int] , lowercase__ :Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self :Dict , lowercase__ :str , lowercase__ :Optional[str] = None ): lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ )
314
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser( description=( '''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2''']) parser.add_argument('''--model_name''', default='''roberta-large''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') __magic_name__ = parser.parse_args() if args.model_type == "roberta": __magic_name__ = RobertaForMaskedLM.from_pretrained(args.model_name) __magic_name__ = '''roberta''' elif args.model_type == "gpt2": __magic_name__ = GPTaLMHeadModel.from_pretrained(args.model_name) __magic_name__ = '''transformer''' __magic_name__ = model.state_dict() __magic_name__ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: __magic_name__ = state_dict[F"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: __magic_name__ = F"""{prefix}.embeddings.{w}.weight""" __magic_name__ = state_dict[param_name] for w in ["weight", "bias"]: __magic_name__ = F"""{prefix}.embeddings.LayerNorm.{w}""" __magic_name__ = state_dict[param_name] # Transformer Blocks # __magic_name__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: __magic_name__ = state_dict[ F"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] __magic_name__ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: __magic_name__ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: __magic_name__ = state_dict[F"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: __magic_name__ = state_dict[F"""lm_head.dense.{w}"""] __magic_name__ = state_dict[F"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: __magic_name__ = state_dict[F"""{prefix}.ln_f.{w}"""] __magic_name__ = state_dict['''lm_head.weight'''] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
314
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowerCAmelCase :Any = logging.get_logger(__name__) def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Union[str, Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __magic_name__ : Dict = json.loads(lowerCAmelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __magic_name__ : List[Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __magic_name__ : List[Any] = json.loads(lowerCAmelCase ) if not mpi_options.get('sagemaker_mpi_enabled' , lowerCAmelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('smdistributed' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : str = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: super().__post_init__() warnings.warn( '`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ' '`TrainingArguments` instead.' , _A , ) @cached_property def __lowerCAmelCase ( self : Dict ) -> "torch.device": logger.info('PyTorch: setting up devices' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( 'torch.distributed process group is initialized, but local_rank == -1. ' 'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' ) if self.no_cuda: __magic_name__ : Any = torch.device('cpu' ) __magic_name__ : List[str] = 0 elif is_sagemaker_model_parallel_available(): __magic_name__ : Any = smp.local_rank() __magic_name__ : List[Any] = torch.device('cuda' , _A ) __magic_name__ : List[str] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta ) __magic_name__ : Optional[Any] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) ) __magic_name__ : Dict = torch.device('cuda' , self.local_rank ) __magic_name__ : int = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __magic_name__ : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __magic_name__ : str = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta ) __magic_name__ : List[str] = torch.device('cuda' , self.local_rank ) __magic_name__ : Union[str, Any] = 1 if device.type == "cuda": torch.cuda.set_device(_A ) return device @property def __lowerCAmelCase ( self : Tuple ) -> int: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __lowerCAmelCase ( self : Optional[int] ) -> Dict: return not is_sagemaker_model_parallel_available() @property def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: return False
561
0
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated a_ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ a_ = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) ,dtype=_UpperCamelCase )[0] @deprecated(_UpperCamelCase ,'''Please use tf.data to implement this functionality.''' ) def a__ ( _UpperCamelCase : str ): print('''Extracting''' ,f.name ) with gzip.GzipFile(fileobj=_UpperCamelCase ) as bytestream: __lowerCamelCase = _readaa(_UpperCamelCase ) if magic != 20_51: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(_UpperCamelCase ) __lowerCamelCase = _readaa(_UpperCamelCase ) __lowerCamelCase = _readaa(_UpperCamelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(_UpperCamelCase ,dtype=numpy.uinta ) __lowerCamelCase = data.reshape(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,1 ) return data @deprecated(_UpperCamelCase ,'''Please use tf.one_hot on tensors.''' ) def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(_UpperCamelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(_UpperCamelCase ,'''Please use tf.data to implement this functionality.''' ) def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Any=10 ): print('''Extracting''' ,f.name ) with gzip.GzipFile(fileobj=_UpperCamelCase ) as bytestream: __lowerCamelCase = _readaa(_UpperCamelCase ) if magic != 20_49: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(_UpperCamelCase ) __lowerCamelCase = bytestream.read(_UpperCamelCase ) __lowerCamelCase = numpy.frombuffer(_UpperCamelCase ,dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_UpperCamelCase ,_UpperCamelCase ) return labels class __lowerCAmelCase : @deprecated( __UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = random_seed.get_seed(__UpperCAmelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(__UpperCAmelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 10000 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F"""images.shape: {images.shape} labels.shape: {labels.shape}""" __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(__UpperCAmelCase , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def lowerCamelCase ( self ): '''simple docstring''' return self._images @property def lowerCamelCase ( self ): '''simple docstring''' return self._labels @property def lowerCamelCase ( self ): '''simple docstring''' return self._num_examples @property def lowerCamelCase ( self ): '''simple docstring''' return self._epochs_completed def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' if fake_data: __lowerCamelCase = [1] * 784 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__UpperCAmelCase )], [fake_label for _ in range(__UpperCAmelCase )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(__UpperCAmelCase ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(__UpperCAmelCase ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_UpperCamelCase ,'''Please write your own downloading logic.''' ) def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any] ): if not gfile.Exists(_UpperCamelCase ): gfile.MakeDirs(_UpperCamelCase ) __lowerCamelCase = os.path.join(_UpperCamelCase ,_UpperCamelCase ) if not gfile.Exists(_UpperCamelCase ): urllib.request.urlretrieve(_UpperCamelCase ,_UpperCamelCase ) # noqa: S310 with gfile.GFile(_UpperCamelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' ,_UpperCamelCase ,_UpperCamelCase ,'''bytes.''' ) return filepath @deprecated( _UpperCamelCase ,'''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=False ,_UpperCamelCase : Tuple=False ,_UpperCamelCase : List[str]=dtypes.floataa ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : Tuple=50_00 ,_UpperCamelCase : str=None ,_UpperCamelCase : Optional[Any]=DEFAULT_SOURCE_URL ,): if fake_data: def fake(): return _DataSet( [] ,[] ,fake_data=_UpperCamelCase ,one_hot=_UpperCamelCase ,dtype=_UpperCamelCase ,seed=_UpperCamelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=_UpperCamelCase ,validation=_UpperCamelCase ,test=_UpperCamelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( _UpperCamelCase ,_UpperCamelCase ,source_url + train_images_file ) with gfile.Open(_UpperCamelCase ,'''rb''' ) as f: __lowerCamelCase = _extract_images(_UpperCamelCase ) __lowerCamelCase = _maybe_download( _UpperCamelCase ,_UpperCamelCase ,source_url + train_labels_file ) with gfile.Open(_UpperCamelCase ,'''rb''' ) as f: __lowerCamelCase = _extract_labels(_UpperCamelCase ,one_hot=_UpperCamelCase ) __lowerCamelCase = _maybe_download( _UpperCamelCase ,_UpperCamelCase ,source_url + test_images_file ) with gfile.Open(_UpperCamelCase ,'''rb''' ) as f: __lowerCamelCase = _extract_images(_UpperCamelCase ) __lowerCamelCase = _maybe_download( _UpperCamelCase ,_UpperCamelCase ,source_url + test_labels_file ) with gfile.Open(_UpperCamelCase ,'''rb''' ) as f: __lowerCamelCase = _extract_labels(_UpperCamelCase ,one_hot=_UpperCamelCase ) if not 0 <= validation_size <= len(_UpperCamelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' F"""{len(_UpperCamelCase )}. Received: {validation_size}.""" ) raise ValueError(_UpperCamelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) __lowerCamelCase = _DataSet(_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) __lowerCamelCase = _DataSet(_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) return _Datasets(train=_UpperCamelCase ,validation=_UpperCamelCase ,test=_UpperCamelCase )
714
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """lxmert""" lowerCAmelCase__ = {} def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = num_qa_labels __lowerCamelCase = num_object_labels __lowerCamelCase = num_attr_labels __lowerCamelCase = l_layers __lowerCamelCase = x_layers __lowerCamelCase = r_layers __lowerCamelCase = visual_feat_dim __lowerCamelCase = visual_pos_dim __lowerCamelCase = visual_loss_normalizer __lowerCamelCase = task_matched __lowerCamelCase = task_mask_lm __lowerCamelCase = task_obj_predict __lowerCamelCase = task_qa __lowerCamelCase = visual_obj_loss __lowerCamelCase = visual_attr_loss __lowerCamelCase = visual_feat_loss __lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**__UpperCAmelCase )
622
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self: Optional[int] ): __lowerCamelCase : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCamelCase : Any = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(a ) , torch_builtin(a ) ) ) self.assertFalse(torch.allclose(gelu_python(a ) , gelu_new(a ) ) ) def _snake_case ( self: Optional[int] ): __lowerCamelCase : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCamelCase : Tuple = get_activation('gelu' ) __lowerCamelCase : Any = get_activation('gelu_10' ) __lowerCamelCase : Tuple = torch_builtin(a ) __lowerCamelCase : Optional[Any] = geluaa(a ) __lowerCamelCase : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(a ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self: Any ): get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(a ): get_activation('bogus' ) with self.assertRaises(a ): get_activation(a ) def _snake_case ( self: str ): __lowerCamelCase : Union[str, Any] = get_activation('gelu' ) __lowerCamelCase : List[Any] = 1 __lowerCamelCase : List[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(a ): __lowerCamelCase : Any = acta.a
669
import unittest from knapsack import greedy_knapsack as kp class A_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self: List[Any] ): __lowerCamelCase : str = [10, 20, 30, 40, 50, 60] __lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12] __lowerCamelCase : Tuple = 100 self.assertEqual(kp.calc_profit(a , a , a ) , 210 ) def _snake_case ( self: str ): self.assertRaisesRegex(a , 'max_weight must greater than zero.' ) def _snake_case ( self: List[str] ): self.assertRaisesRegex(a , 'Weight can not be negative.' ) def _snake_case ( self: Dict ): self.assertRaisesRegex(a , 'Profit can not be negative.' ) def _snake_case ( self: List[str] ): self.assertRaisesRegex(a , 'max_weight must greater than zero.' ) def _snake_case ( self: Any ): self.assertRaisesRegex( a , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
669
1
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : List[str] =logging.get_logger(__name__) UpperCAmelCase__ : str ={ '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __A ( a ): __A = """encodec""" def __init__( self , UpperCAmelCase_=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCAmelCase_=24000 , UpperCAmelCase_=1 , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=128 , UpperCAmelCase_=32 , UpperCAmelCase_=1 , UpperCAmelCase_=[8, 5, 4, 2] , UpperCAmelCase_="weight_norm" , UpperCAmelCase_=7 , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=True , UpperCAmelCase_="reflect" , UpperCAmelCase_=2 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_=1024 , UpperCAmelCase_=None , UpperCAmelCase_=True , **UpperCAmelCase_ , ): lowerCamelCase =target_bandwidths lowerCamelCase =sampling_rate lowerCamelCase =audio_channels lowerCamelCase =normalize lowerCamelCase =chunk_length_s lowerCamelCase =overlap lowerCamelCase =hidden_size lowerCamelCase =num_filters lowerCamelCase =num_residual_layers lowerCamelCase =upsampling_ratios lowerCamelCase =norm_type lowerCamelCase =kernel_size lowerCamelCase =last_kernel_size lowerCamelCase =residual_kernel_size lowerCamelCase =dilation_growth_rate lowerCamelCase =use_causal_conv lowerCamelCase =pad_mode lowerCamelCase =compress lowerCamelCase =num_lstm_layers lowerCamelCase =trim_right_ratio lowerCamelCase =codebook_size lowerCamelCase =codebook_dim if codebook_dim is not None else hidden_size lowerCamelCase =use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**UpperCAmelCase_ ) @property def _snake_case ( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _snake_case ( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def _snake_case ( self ): lowerCamelCase =np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def _snake_case ( self ): return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
719
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=16 , UpperCAmelCase_=[32, 64, 128] , UpperCAmelCase_=[1, 2, 1] , UpperCAmelCase_=[2, 2, 4] , UpperCAmelCase_=2 , UpperCAmelCase_=2.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=10 , UpperCAmelCase_=8 , UpperCAmelCase_=["stage1", "stage2"] , UpperCAmelCase_=[1, 2] , ): lowerCamelCase =parent lowerCamelCase =batch_size lowerCamelCase =image_size lowerCamelCase =patch_size lowerCamelCase =num_channels lowerCamelCase =embed_dim lowerCamelCase =hidden_sizes lowerCamelCase =depths lowerCamelCase =num_heads lowerCamelCase =window_size lowerCamelCase =mlp_ratio lowerCamelCase =qkv_bias lowerCamelCase =hidden_dropout_prob lowerCamelCase =attention_probs_dropout_prob lowerCamelCase =drop_path_rate lowerCamelCase =hidden_act lowerCamelCase =use_absolute_embeddings lowerCamelCase =patch_norm lowerCamelCase =layer_norm_eps lowerCamelCase =initializer_range lowerCamelCase =is_training lowerCamelCase =scope lowerCamelCase =use_labels lowerCamelCase =type_sequence_label_size lowerCamelCase =encoder_stride lowerCamelCase =out_features lowerCamelCase =out_indices def _snake_case ( self ): lowerCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase =None if self.use_labels: lowerCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase =self.get_config() return config, pixel_values, labels def _snake_case ( self ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =FocalNetModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =model(UpperCAmelCase_ ) lowerCamelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =FocalNetBackbone(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =model(UpperCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase =None lowerCamelCase =FocalNetBackbone(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =model(UpperCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =FocalNetForMaskedImageModeling(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =model(UpperCAmelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase =1 lowerCamelCase =FocalNetForMaskedImageModeling(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase =model(UpperCAmelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =self.type_sequence_label_size lowerCamelCase =FocalNetForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase =1 lowerCamelCase =FocalNetForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase =model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self ): lowerCamelCase =self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase =config_and_inputs lowerCamelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __A ( a , a , unittest.TestCase ): __A = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) __A = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False __A = False def _snake_case ( self ): lowerCamelCase =FocalNetModelTester(self ) lowerCamelCase =ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 , has_text_modality=UpperCAmelCase_ ) def _snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ): return def _snake_case ( self ): lowerCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _snake_case ( self ): lowerCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCAmelCase_ ) def _snake_case ( self ): lowerCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ ) def _snake_case ( self ): lowerCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def _snake_case ( self ): pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def _snake_case ( self ): pass def _snake_case ( self ): lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase =model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) ) def _snake_case ( self ): lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase =model_class(UpperCAmelCase_ ) lowerCamelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase =[*signature.parameters.keys()] lowerCamelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): lowerCamelCase =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) lowerCamelCase =outputs.hidden_states lowerCamelCase =getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # FocalNet has a different seq_length lowerCamelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase =outputs.reshaped_hidden_states self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =reshaped_hidden_states[0].shape lowerCamelCase =( reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _snake_case ( self ): lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase =True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase =True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def _snake_case ( self ): lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase =3 lowerCamelCase =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase =True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase =True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) ) @slow def _snake_case ( self ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase =FocalNetModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def _snake_case ( self ): lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase =_config_zero_init(UpperCAmelCase_ ) for model_class in self.all_model_classes: lowerCamelCase =model_class(config=UpperCAmelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class __A ( unittest.TestCase ): @cached_property def _snake_case ( self ): # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def _snake_case ( self ): lowerCamelCase =FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(UpperCAmelCase_ ) lowerCamelCase =self.default_image_processor lowerCamelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase =image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): lowerCamelCase =model(**UpperCAmelCase_ ) # verify the logits lowerCamelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) lowerCamelCase =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class __A ( a , unittest.TestCase ): __A = (FocalNetBackbone,) if is_torch_available() else () __A = FocalNetConfig __A = False def _snake_case ( self ): lowerCamelCase =FocalNetModelTester(self )
269
0
lowercase__ : Union[str, Any] = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
312
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=() , snake_case__=None , snake_case__="no" , snake_case__="29500" ) -> Dict: lowerCAmelCase = False lowerCAmelCase = False if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ): lowerCAmelCase = True elif "IPython" in sys.modules: lowerCAmelCase = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() ) try: lowerCAmelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , snake_case__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ''' '''your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if num_processes is None: lowerCAmelCase = 8 lowerCAmelCase = PrepareForLaunch(snake_case__ , distributed_type='''TPU''' ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on one CPU.''' ) function(*snake_case__ ) else: if num_processes is None: raise ValueError( '''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ''' '''inside your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if torch.cuda.is_initialized(): raise ValueError( '''To launch a multi-GPU training from your notebook, you need to avoid running any instruction ''' '''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ''' '''function.''' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=snake_case__ , master_addr='''127.0.01''' , master_port=snake_case__ , mixed_precision=snake_case__ ): lowerCAmelCase = PrepareForLaunch(snake_case__ , distributed_type='''MULTI_GPU''' ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( '''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ''' '''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ''' '''Please review your imports and test them when running the `notebook_launcher()` to identify ''' '''which one is problematic.''' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCAmelCase = '''1''' print('''Launching training on MPS.''' ) elif torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on CPU.''' ) function(*snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=() , snake_case__=2 ) -> Dict: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=snake_case__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ): lowerCAmelCase = PrepareForLaunch(snake_case__ , debug=snake_case__ ) start_processes(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' )
312
1
import math def _UpperCamelCase (a__ :int ): """simple docstring""" UpperCamelCase__ = [True] * n UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCamelCase__ = i * 2 while index < n: UpperCamelCase__ = False UpperCamelCase__ = index + i UpperCamelCase__ = [2] for i in range(3 , a__ , 2 ): if is_prime[i]: primes.append(a__ ) return primes def _UpperCamelCase (a__ :int = 9999_6666_3333 ): """simple docstring""" UpperCamelCase__ = math.floor(math.sqrt(a__ ) ) + 100 UpperCamelCase__ = prime_sieve(a__ ) UpperCamelCase__ = 0 UpperCamelCase__ = 0 UpperCamelCase__ = primes[prime_index] while (last_prime**2) <= limit: UpperCamelCase__ = primes[prime_index + 1] UpperCamelCase__ = last_prime**2 UpperCamelCase__ = next_prime**2 # Get numbers divisible by lps(current) UpperCamelCase__ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCamelCase__ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCamelCase__ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCamelCase__ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
548
import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class __SCREAMING_SNAKE_CASE ( _a ): snake_case : Optional[Any] = """data2vec-audio""" def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=16 , __lowerCAmelCase=19 , __lowerCAmelCase=5 , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="sum" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ): super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) UpperCamelCase__ = hidden_size UpperCamelCase__ = feat_extract_activation UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = conv_bias UpperCamelCase__ = num_conv_pos_embeddings UpperCamelCase__ = num_conv_pos_embedding_groups UpperCamelCase__ = conv_pos_kernel_size UpperCamelCase__ = len(self.conv_dim ) UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = feat_proj_dropout UpperCamelCase__ = final_dropout UpperCamelCase__ = layerdrop UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = initializer_range UpperCamelCase__ = vocab_size UpperCamelCase__ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase__ = mask_time_prob UpperCamelCase__ = mask_time_length UpperCamelCase__ = mask_time_min_masks UpperCamelCase__ = mask_feature_prob UpperCamelCase__ = mask_feature_length UpperCamelCase__ = mask_feature_min_masks # ctc loss UpperCamelCase__ = ctc_loss_reduction UpperCamelCase__ = ctc_zero_infinity # adapter UpperCamelCase__ = add_adapter UpperCamelCase__ = adapter_kernel_size UpperCamelCase__ = adapter_stride UpperCamelCase__ = num_adapter_layers UpperCamelCase__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCamelCase__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = list(__lowerCAmelCase ) UpperCamelCase__ = xvector_output_dim @property def _lowerCamelCase ( self ): return math.prod(self.conv_stride )
548
1
'''simple docstring''' import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging _UpperCamelCase : str = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] _UpperCamelCase : str = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() _UpperCamelCase : List[str] = logging.get_logger(__name__) _UpperCamelCase : str = ' Hello world! cécé herlolip' _UpperCamelCase : Union[str, Any] = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def __UpperCAmelCase ( A : Tuple ) -> Tuple: UpperCAmelCase_ : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(A , A ) def __UpperCAmelCase ( A : Union[str, Any] , A : str , A : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : str = dct.pop(A ) UpperCAmelCase_ : Dict = val def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[str] = torch.load(A , map_location='''cpu''' ) UpperCAmelCase_ : str = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __UpperCAmelCase ( A : Optional[int] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = emb.weight.shape UpperCAmelCase_ : Optional[Any] = nn.Linear(A , A , bias=A ) UpperCAmelCase_ : Dict = emb.weight.data return lin_layer @torch.no_grad() def __UpperCAmelCase ( A : Optional[Any] , A : Optional[Any] , A : Optional[int]=None ) -> int: if not os.path.exists(A ): UpperCAmelCase_ : int = torch.hub.load('''pytorch/fairseq''' , A ).eval() else: UpperCAmelCase_ : List[str] = load_xsum_checkpoint(A ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: UpperCAmelCase_ : Dict = checkpoint_path.replace('''.''' , '''-''' ) UpperCAmelCase_ : List[Any] = BartConfig.from_pretrained(A ) UpperCAmelCase_ : Optional[Any] = bart.encode(A ).unsqueeze(0 ) UpperCAmelCase_ : int = BartTokenizer.from_pretrained(A ).encode(A , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(A , A ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": UpperCAmelCase_ : int = bart.state_dict() remove_ignore_keys_(A ) UpperCAmelCase_ : Optional[int] = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(A , A , A ) UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification(A ).eval() model.load_state_dict(A ) UpperCAmelCase_ : str = bart.predict('''mnli''' , A , return_logits=A ) UpperCAmelCase_ : List[str] = model(A )[0] # logits else: # no classification heads to worry about UpperCAmelCase_ : Union[str, Any] = bart.model.state_dict() remove_ignore_keys_(A ) UpperCAmelCase_ : Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''] UpperCAmelCase_ : int = bart.extract_features(A ) if hf_checkpoint_name == "facebook/bart-large": UpperCAmelCase_ : List[Any] = BartModel(A ).eval() model.load_state_dict(A ) UpperCAmelCase_ : int = model(A ).model[0] else: UpperCAmelCase_ : str = BartForConditionalGeneration(A ).eval() # an existing summarization ckpt model.model.load_state_dict(A ) if hasattr(A , '''lm_head''' ): UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.shared ) UpperCAmelCase_ : int = model.model(A )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) if __name__ == "__main__": _UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) _UpperCamelCase : Union[str, Any] = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
541
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _UpperCamelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class snake_case__ ( nn.Module): def __init__( self : Optional[int] , _A : Tuple ) -> List[str]: super().__init__() UpperCAmelCase_ : Tuple = torchvision.models.resnetaaa(pretrained=_A ) UpperCAmelCase_ : Union[str, Any] = list(model.children() )[:-2] UpperCAmelCase_ : Union[str, Any] = nn.Sequential(*_A ) UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def A ( self : str , _A : Optional[int] ) -> str: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 UpperCAmelCase_ : List[str] = self.pool(self.model(_A ) ) UpperCAmelCase_ : Tuple = torch.flatten(_A , start_dim=2 ) UpperCAmelCase_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class snake_case__ ( UpperCamelCase): def __init__( self : Optional[int] , _A : int , _A : str , _A : int , _A : Dict , _A : int ) -> List[str]: UpperCAmelCase_ : Any = [json.loads(_A ) for l in open(_A )] UpperCAmelCase_ : Tuple = os.path.dirname(_A ) UpperCAmelCase_ : Any = tokenizer UpperCAmelCase_ : Optional[Any] = labels UpperCAmelCase_ : List[str] = len(_A ) UpperCAmelCase_ : int = max_seq_length UpperCAmelCase_ : str = transforms def __len__( self : Tuple ) -> Tuple: return len(self.data ) def __getitem__( self : Union[str, Any] , _A : Tuple ) -> List[Any]: UpperCAmelCase_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_A ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sentence[0], sentence[1:-1], sentence[-1] UpperCAmelCase_ : List[str] = sentence[: self.max_seq_length] UpperCAmelCase_ : List[str] = torch.zeros(self.n_classes ) UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) UpperCAmelCase_ : List[Any] = self.transforms(_A ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def A ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def __UpperCAmelCase ( A : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : int = [len(row['''sentence'''] ) for row in batch] UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(A ), max(A ) UpperCAmelCase_ : Tuple = torch.zeros(A , A , dtype=torch.long ) UpperCAmelCase_ : Dict = torch.zeros(A , A , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(A , A ) ): UpperCAmelCase_ : int = input_row['''sentence'''] UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Union[str, Any] = torch.stack([row['''image'''] for row in batch] ) UpperCAmelCase_ : Optional[int] = torch.stack([row['''label'''] for row in batch] ) UpperCAmelCase_ : Any = torch.stack([row['''image_start_token'''] for row in batch] ) UpperCAmelCase_ : int = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def __UpperCAmelCase ( ) -> Union[str, Any]: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def __UpperCAmelCase ( ) -> Union[str, Any]: return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
541
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _snake_case : def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=2 ,_snake_case=24 ,_snake_case=16 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=None ,_snake_case=2 ,_snake_case=2 ,): UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Dict = batch_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : int = max_length UpperCAmelCase_ : Tuple = num_mel_bins UpperCAmelCase_ : int = is_training UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Tuple = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Tuple = scope UpperCAmelCase_ : Union[str, Any] = frequency_stride UpperCAmelCase_ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase_ : Dict = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCAmelCase_ : str = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCAmelCase_ : List[Any] = frequency_out_dimension * time_out_dimension UpperCAmelCase_ : int = num_patches + 2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, input_values, labels def UpperCamelCase__ ( self ): return ASTConfig( patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : List[Any] = ASTModel(config=_snake_case ) model.to(_snake_case ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Any = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Dict = config_and_inputs UpperCAmelCase_ : Optional[int] = {"input_values": input_values} return config, inputs_dict @require_torch class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Optional[Any] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) __A : List[Any] =( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) __A : int =False __A : Tuple =False __A : List[str] =False __A : Optional[Any] =False def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = ASTModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(_snake_case ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Dict = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ["input_values"] self.assertListEqual(arg_names[:1] ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @slow def UpperCamelCase__ ( self ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = ASTModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def a__ ( ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torchaudio.load(_SCREAMING_SNAKE_CASE ) return audio, sampling_rate @require_torch @require_torchaudio class _snake_case (unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[int] = self.default_feature_extractor UpperCAmelCase_ : Tuple = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_snake_case ) UpperCAmelCase_ : Any = self.default_feature_extractor UpperCAmelCase_ , UpperCAmelCase_ : Dict = prepare_audio() UpperCAmelCase_ : str = audio.squeeze().numpy() UpperCAmelCase_ : Optional[Any] = feature_extractor(_snake_case ,sampling_rate=_snake_case ,return_tensors="pt" ).to(_snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : int = model(**_snake_case ) # verify the logits UpperCAmelCase_ : int = torch.Size((1, 5_27) ) self.assertEqual(outputs.logits.shape ,_snake_case ) UpperCAmelCase_ : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
323
'''simple docstring''' import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _lowerCamelCase = """bert-base-cased""" _lowerCamelCase = """google/pegasus-xsum""" _lowerCamelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""] _lowerCamelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""] _lowerCamelCase = """patrickvonplaten/t5-tiny-random""" _lowerCamelCase = """sshleifer/bart-tiny-random""" _lowerCamelCase = """sshleifer/tiny-mbart""" _lowerCamelCase = """sshleifer/tiny-marian-en-de""" def a__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = "\n".join(_SCREAMING_SNAKE_CASE ) Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.source''' ) , _SCREAMING_SNAKE_CASE ) _dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.target''' ) , _SCREAMING_SNAKE_CASE ) return tmp_dir class _snake_case (__SCREAMING_SNAKE_CASE): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) @slow def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case ) UpperCAmelCase_ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCAmelCase_ : Optional[int] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES ) UpperCAmelCase_ : List[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES ) UpperCAmelCase_ : Optional[int] = 4 UpperCAmelCase_ : Union[str, Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated UpperCAmelCase_ , UpperCAmelCase_ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. UpperCAmelCase_ : Tuple = SeqaSeqDataset( _snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,src_lang=_snake_case ,tgt_lang=_snake_case ,) UpperCAmelCase_ : List[Any] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_snake_case ,_snake_case ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch["labels"] ,tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_snake_case ) UpperCAmelCase_ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES ) UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES ) UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Dict = LegacySeqaSeqDataset( _snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=20 ,max_target_length=_snake_case ,) UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) UpperCAmelCase_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) UpperCAmelCase_ : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines() UpperCAmelCase_ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_snake_case ,_snake_case ,1_28 ,_snake_case ) UpperCAmelCase_ : Dict = {x.name for x in tmp_dir.iterdir()} UpperCAmelCase_ : str = {x.name for x in save_dir.iterdir()} UpperCAmelCase_ : List[str] = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_snake_case ) < len(_snake_case ) assert len(_snake_case ) == 1 assert len(packed_examples[0] ) == sum(len(_snake_case ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="This test requires fairseq" ) def UpperCamelCase__ ( self ): if not FAIRSEQ_AVAILABLE: return UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset(max_len=64 ) UpperCAmelCase_ : int = 64 UpperCAmelCase_ : str = ds.make_dynamic_sampler(_snake_case ,required_batch_size_multiple=_snake_case ) UpperCAmelCase_ : Dict = [len(_snake_case ) for x in batch_sampler] assert len(set(_snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_snake_case ) == len(_snake_case ) # no dropped or added examples UpperCAmelCase_ : Any = DataLoader(_snake_case ,batch_sampler=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ) UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] for batch in data_loader: UpperCAmelCase_ : Any = batch["input_ids"].shape UpperCAmelCase_ : Optional[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple UpperCAmelCase_ : int = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_snake_case ) if num_src_tokens > (max_tokens * 1.1): failures.append(_snake_case ) assert num_src_per_batch[0] == max(_snake_case ) if failures: raise AssertionError(f'''too many tokens in {len(_snake_case )} batches''' ) def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset(max_len=5_12 ) UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Optional[int] = ds.make_sortish_sampler(_snake_case ,shuffle=_snake_case ) UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ) UpperCAmelCase_ : Union[str, Any] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=_snake_case ) UpperCAmelCase_ : List[str] = tokenizer.pad_token_id def count_pad_tokens(_snake_case ,_snake_case="input_ids" ): return [batch[k].eq(_snake_case ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_snake_case ,k="labels" ) ) < sum(count_pad_tokens(_snake_case ,k="labels" ) ) assert sum(count_pad_tokens(_snake_case ) ) < sum(count_pad_tokens(_snake_case ) ) assert len(_snake_case ) == len(_snake_case ) def UpperCamelCase__ ( self ,_snake_case=10_00 ,_snake_case=1_28 ): if os.getenv("USE_REAL_DATA" ,_snake_case ): UpperCAmelCase_ : List[Any] = "examples/seq2seq/wmt_en_ro" UpperCAmelCase_ : Dict = max_len * 2 * 64 if not Path(_snake_case ).joinpath("train.len" ).exists(): save_len_file(_snake_case ,_snake_case ) else: UpperCAmelCase_ : Optional[Any] = "examples/seq2seq/test_data/wmt_en_ro" UpperCAmelCase_ : Union[str, Any] = max_len * 4 save_len_file(_snake_case ,_snake_case ) UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_snake_case ) UpperCAmelCase_ : Union[str, Any] = SeqaSeqDataset( _snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,n_obs=_snake_case ,) return ds, max_tokens, tokenizer def UpperCamelCase__ ( self ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._get_dataset() UpperCAmelCase_ : Tuple = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=0 ,add_extra_examples=_snake_case ) ) UpperCAmelCase_ : Union[str, Any] = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=1 ,add_extra_examples=_snake_case ) ) assert idsa.intersection(_snake_case ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_snake_case ,use_fast=_snake_case ) if tok_name == MBART_TINY: UpperCAmelCase_ : int = SeqaSeqDataset( _snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,src_lang="EN" ,tgt_lang="FR" ,) UpperCAmelCase_ : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: UpperCAmelCase_ : int = SeqaSeqDataset( _snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,) UpperCAmelCase_ : Optional[int] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_snake_case ) == 1 if tok_name == BART_TINY else len(_snake_case ) == 0
323
1
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging a__ = logging.get_logger(__name__) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[Any] = CLIPConfig UpperCAmelCase__ : List[Any] = ["CLIPEncoderLayer"] def __init__( self , _a ) -> List[Any]: super().__init__(_a ) _a : List[str] = CLIPVisionModelWithProjection(config.vision_config ) _a : Any = nn.Linear(config.vision_config.projection_dim , 1 ) _a : Any = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def __lowercase ( self , _a , _a , _a=0.5 , _a=0.5 ) -> Any: _a : Dict = self.vision_model(_a )[0] _a : Union[str, Any] = self.p_head(_a ) _a : str = nsfw_detected.flatten() _a : Optional[int] = nsfw_detected > p_threshold _a : Dict = nsfw_detected.tolist() if any(_a ): logger.warning( '''Potential NSFW content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, nsfw_detected_ in enumerate(_a ): if nsfw_detected_: _a : Union[str, Any] = np.zeros(images[idx].shape ) _a : str = self.w_head(_a ) _a : Dict = watermark_detected.flatten() _a : Optional[Any] = watermark_detected > w_threshold _a : str = watermark_detected.tolist() if any(_a ): logger.warning( '''Potential watermarked content was detected in one or more images. A black image will be returned instead.''' ''' Try again with a different prompt and/or seed.''' ) for idx, watermark_detected_ in enumerate(_a ): if watermark_detected_: _a : Dict = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
14
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : int = "mobilenet_v1" def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) _a : Tuple = num_channels _a : str = image_size _a : Tuple = depth_multiplier _a : Any = min_depth _a : int = hidden_act _a : Optional[Any] = tf_padding _a : str = classifier_dropout_prob _a : Optional[int] = initializer_range _a : Any = layer_norm_eps class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : str = version.parse("1.11" ) @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def __lowercase ( self ) -> float: return 1e-4
14
1
'''simple docstring''' from itertools import product def lowerCamelCase__ ( _A , _A ): a : int = sides_number a : Tuple = max_face_number * dice_number a : str = [0] * (max_total + 1) a : Optional[Any] = 1 a : List[Any] = range(_A , max_face_number + 1 ) for dice_numbers in product(_A , repeat=_A ): a : Any = sum(_A ) totals_frequencies[total] += 1 return totals_frequencies def lowerCamelCase__ ( ): a : List[Any] = total_frequency_distribution( sides_number=4 , dice_number=9 ) a : Dict = total_frequency_distribution( sides_number=6 , dice_number=6 ) a : Any = 0 a : Any = 9 a : Tuple = 4 * 9 a : Optional[int] = 6 for peter_total in range(_A , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) a : List[str] = (4**9) * (6**6) a : Tuple = peter_wins_count / total_games_number a : Optional[Any] = round(_A , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
703
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase__ ( _A ): return (data["data"], data["target"]) def lowerCamelCase__ ( _A , _A , _A ): a : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_A , _A ) # Predict target for test data a : Any = xgb.predict(_A ) a : Any = predictions.reshape(len(_A ) , 1 ) return predictions def lowerCamelCase__ ( ): a : Optional[int] = fetch_california_housing() a , a : Optional[Any] = data_handling(_A ) a , a , a , a : Dict = train_test_split( _A , _A , test_size=0.25 , random_state=1 ) a : List[str] = xgboost(_A , _A , _A ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_A , _A )}""" ) print(f"""Mean Square Error : {mean_squared_error(_A , _A )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
195
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : Dict =get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = XLMRobertaTokenizer _UpperCAmelCase = XLMRobertaTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = '<pad>' SCREAMING_SNAKE_CASE_ : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def UpperCamelCase__ ( self ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE_ : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : str = tokenizer_r.save_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE_ : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' ) def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase__ , f.name ) SCREAMING_SNAKE_CASE_ : List[str] = XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = pickle.dumps(lowerCAmelCase__ ) pickle.loads(lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 'Hello World!' SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ 0, 3_2_9_3, 8_3, 1_0, 4_5_5_2, 4_9_8_9, 7_9_8_6, 6_7_8, 1_0, 5_9_1_5, 1_1_1, 1_7_9_4_5_9, 1_2_4_8_5_0, 4, 6_0_4_4, 2_3_7, 1_2, 6, 5, 6, 4, 6_7_8_0, 7_0_5, 1_5, 1_3_8_8, 4_4, 3_7_8, 1_0_1_1_4, 7_1_1, 1_5_2, 2_0, 6, 5, 2_2_3_7_6, 6_4_2, 1_2_2_1, 1_5_1_9_0, 3_4_1_5_3, 4_5_0, 5_6_0_8, 9_5_9, 1_1_1_9, 5_7_7_0_2, 1_3_6, 1_8_6, 4_7, 1_0_9_8, 2_9_3_6_7, 4_7, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_0_4_4, 2_3_7, 6_2_8_4, 5_0_9_0_1, 5_2_8, 3_1, 9_0, 3_4, 9_2_7, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
101
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowercase_ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self ) -> str: """simple docstring""" UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" ) UpperCAmelCase_ = "The dog is cute and lives in the garden house" UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] ) UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"] self.assertEqual(output.shape , UpperCamelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
660
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): A : int = "vit" def __init__( self : List[str] , _lowerCAmelCase : Optional[Any]=7_68 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Optional[int]=30_72 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : List[str]=2_24 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=16 , **_lowerCAmelCase : int , ): super().__init__(**_lowerCAmelCase ) __snake_case : Optional[int] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Any = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : Optional[Any] = initializer_range __snake_case : str = layer_norm_eps __snake_case : Union[str, Any] = image_size __snake_case : List[str] = patch_size __snake_case : Union[str, Any] = num_channels __snake_case : Dict = qkv_bias __snake_case : List[str] = encoder_stride class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): A : Optional[int] = version.parse("1.11" ) @property def snake_case__ ( self : List[str] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def snake_case__ ( self : Tuple ): return 1e-4
390
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' __snake_case : int = 1 __snake_case : Any = 2 while i * i <= n: __snake_case : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCAmelCase ( ): '''simple docstring''' __snake_case : str = 1 __snake_case : Dict = 1 while True: i += 1 t_num += i if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
390
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class lowerCAmelCase ( __UpperCamelCase ): def A_ ( self : str , UpperCAmelCase : float ) -> float: return 0.0 def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> tuple[int | float, int | float]: lowerCamelCase__ : str = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase__ : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> None: lowerCamelCase__ : Any = 512 lowerCamelCase__ : int = [1] + [0] * (size - 1) lowerCamelCase__ : str = [filter_type.process(_UpperCAmelCase ) for item in inputs] lowerCamelCase__ : Dict = [0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase__ : Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) lowerCamelCase__ : Optional[int] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds lowerCamelCase__ : Union[str, Any] = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(_UpperCAmelCase ) plt.show() def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> None: lowerCamelCase__ : Optional[Any] = 512 lowerCamelCase__ : Any = [1] + [0] * (size - 1) lowerCamelCase__ : Optional[Any] = [filter_type.process(_UpperCAmelCase ) for item in inputs] lowerCamelCase__ : Dict = [0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase__ : Tuple = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
295
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """efficientformer""" def __init__( self : Dict , UpperCAmelCase : List[int] = [3, 2, 6, 4] , UpperCAmelCase : List[int] = [48, 96, 224, 448] , UpperCAmelCase : List[bool] = [True, True, True, True] , UpperCAmelCase : int = 448 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 4 , UpperCAmelCase : int = 7 , UpperCAmelCase : int = 5 , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1e-5 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1e-12 , UpperCAmelCase : int = 224 , UpperCAmelCase : float = 1e-05 , **UpperCAmelCase : Tuple , ) -> None: super().__init__(**UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : int = hidden_dropout_prob lowerCamelCase__ : List[Any] = hidden_sizes lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : int = patch_size lowerCamelCase__ : List[Any] = num_channels lowerCamelCase__ : List[str] = depths lowerCamelCase__ : Any = mlp_expansion_ratio lowerCamelCase__ : Any = downsamples lowerCamelCase__ : str = dim lowerCamelCase__ : Tuple = key_dim lowerCamelCase__ : int = attention_ratio lowerCamelCase__ : int = resolution lowerCamelCase__ : Dict = pool_size lowerCamelCase__ : List[str] = downsample_patch_size lowerCamelCase__ : Tuple = downsample_stride lowerCamelCase__ : int = downsample_pad lowerCamelCase__ : Optional[int] = drop_path_rate lowerCamelCase__ : Optional[Any] = num_metaad_blocks lowerCamelCase__ : Any = distillation lowerCamelCase__ : Optional[int] = use_layer_scale lowerCamelCase__ : Union[str, Any] = layer_scale_init_value lowerCamelCase__ : Optional[int] = image_size lowerCamelCase__ : Optional[Any] = batch_norm_eps
295
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : int = """\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n""" _UpperCAmelCase : Tuple = """\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n""" _UpperCAmelCase : int = """\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE) __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE): __lowerCAmelCase = intersect_and_union( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = total_intersect_and_union( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(__SCREAMING_SNAKE_CASE) __lowerCAmelCase = np.nanmean(__SCREAMING_SNAKE_CASE) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE, nan=__SCREAMING_SNAKE_CASE) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=lowercase_ , gt_seg_maps=lowercase_ , num_labels=lowercase_ , ignore_index=lowercase_ , nan_to_num=lowercase_ , label_map=lowercase_ , reduce_labels=lowercase_ , ) return iou_result
704
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCAmelCase : Tuple = random.Random() if is_torch_available(): import torch def __magic_name__( lowerCamelCase, lowerCamelCase=1.0, lowerCamelCase=None, lowerCamelCase=None): if rng is None: __lowerCAmelCase = global_rng __lowerCAmelCase = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase=7 , __lowercase=4_00 , __lowercase=20_00 , __lowercase=1 , __lowercase=0.0 , __lowercase=1_60_00 , __lowercase=True , __lowercase=True , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = min_seq_length __lowerCAmelCase = max_seq_length __lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase = feature_size __lowerCAmelCase = padding_value __lowerCAmelCase = sampling_rate __lowerCAmelCase = return_attention_mask __lowerCAmelCase = do_normalize def _snake_case (self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _snake_case (self , __lowercase=False , __lowercase=False ): def _flatten(__lowercase ): return list(itertools.chain(*__lowercase ) ) if equal_length: __lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase = [np.asarray(__lowercase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Union[str, Any] = ASTFeatureExtractor def _snake_case (self ): __lowerCAmelCase = ASTFeatureExtractionTester(self ) def _snake_case (self ): # Tests that all call wrap to encode_plus and batch_encode_plus __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCAmelCase = [np.asarray(__lowercase ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) ) # Test batched __lowerCAmelCase = feat_extract(__lowercase , padding=__lowercase , return_tensors='''np''' ).input_values __lowerCAmelCase = feat_extract(__lowercase , padding=__lowercase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ): self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCAmelCase = np.asarray(__lowercase ) __lowerCAmelCase = feat_extract(__lowercase , return_tensors='''np''' ).input_values __lowerCAmelCase = feat_extract(__lowercase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ): self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) ) @require_torch def _snake_case (self ): import torch __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase = np.random.rand(1_00 ).astype(np.floataa ) __lowerCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _snake_case (self , __lowercase ): from datasets import load_dataset __lowerCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase = ds.sort('''id''' ).select(range(__lowercase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _snake_case (self ): # fmt: off __lowerCAmelCase = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on __lowerCAmelCase = self._load_datasamples(1 ) __lowerCAmelCase = ASTFeatureExtractor() __lowerCAmelCase = feature_extractor(__lowercase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , __lowercase , atol=1e-4 ) )
474
0
from collections import defaultdict from math import ceil, sqrt def UpperCamelCase ( lowercase_ = 1_00_00_00 , lowercase_ = 10 ) -> int: '''simple docstring''' lowercase__ : defaultdict = defaultdict(lowercase_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowercase__ : Any = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowercase__ : List[str] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(lowercase_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
from datetime import datetime import requests def __a ( __lowerCAmelCase ) -> bytes: SCREAMING_SNAKE_CASE : Optional[int] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' SCREAMING_SNAKE_CASE : List[Any] = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(__lowerCAmelCase ).content if __name__ == "__main__": _lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip() _lowerCamelCase : Tuple = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, """wb""") as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
308
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : Optional[int] = 'git_vision_model' def __init__( self : Optional[Any] , snake_case : Any=768 , snake_case : List[str]=3072 , snake_case : Optional[Any]=12 , snake_case : Optional[Any]=12 , snake_case : Tuple=3 , snake_case : str=224 , snake_case : Tuple=16 , snake_case : Union[str, Any]="quick_gelu" , snake_case : Dict=1E-5 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.02 , **snake_case : int , ): '''simple docstring''' super().__init__(**snake_case ) SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : str = num_attention_heads SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : List[Any] = attention_dropout SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : str = hidden_act @classmethod def lowerCamelCase_ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": SCREAMING_SNAKE_CASE : Optional[int] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : int = 'git' def __init__( self : Union[str, Any] , snake_case : str=None , snake_case : List[str]=30522 , snake_case : Optional[Any]=768 , snake_case : Optional[Any]=6 , snake_case : Union[str, Any]=12 , snake_case : Union[str, Any]=3072 , snake_case : Dict="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=1024 , snake_case : Tuple=0.02 , snake_case : Dict=1E-12 , snake_case : List[str]=0 , snake_case : Optional[int]="absolute" , snake_case : Optional[int]=True , snake_case : Optional[int]=False , snake_case : Optional[Any]=101 , snake_case : Optional[int]=102 , snake_case : int=None , **snake_case : Any , ): '''simple docstring''' super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE : List[Any] = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Dict = layer_norm_eps SCREAMING_SNAKE_CASE : Any = position_embedding_type SCREAMING_SNAKE_CASE : Any = use_cache SCREAMING_SNAKE_CASE : int = tie_word_embeddings SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding SCREAMING_SNAKE_CASE : Tuple = bos_token_id SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type return output
308
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = list(SCREAMING_SNAKE_CASE__ ) _snake_case = list(SCREAMING_SNAKE_CASE__ ) _snake_case = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if lista[i] != lista[i]: count += 1 _snake_case = "_" if count > 1: return False else: return "".join(SCREAMING_SNAKE_CASE__ ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = [] while True: _snake_case = ["$"] * len(SCREAMING_SNAKE_CASE__ ) _snake_case = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ): _snake_case = compare_string(binary[i] , binary[j] ) if k is False: _snake_case = "*" _snake_case = "*" temp.append("X" ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(SCREAMING_SNAKE_CASE__ ) == 0: return pi _snake_case = list(set(SCREAMING_SNAKE_CASE__ ) ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = [] for minterm in minterms: _snake_case = "" for _ in range(SCREAMING_SNAKE_CASE__ ): _snake_case = str(minterm % 2 ) + string minterm //= 2 temp.append(SCREAMING_SNAKE_CASE__ ) return temp def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = list(SCREAMING_SNAKE_CASE__ ) _snake_case = list(SCREAMING_SNAKE_CASE__ ) _snake_case = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = [] _snake_case = [0] * len(SCREAMING_SNAKE_CASE__ ) for i in range(len(chart[0] ) ): _snake_case = 0 _snake_case = -1 for j in range(len(SCREAMING_SNAKE_CASE__ ) ): if chart[j][i] == 1: count += 1 _snake_case = j if count == 1: _snake_case = 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(SCREAMING_SNAKE_CASE__ ) ): _snake_case = 0 temp.append(prime_implicants[i] ) while True: _snake_case = 0 _snake_case = -1 _snake_case = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): _snake_case = chart[i].count(1 ) if count_n > max_n: _snake_case = count_n _snake_case = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(SCREAMING_SNAKE_CASE__ ) ): _snake_case = 0 def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): _snake_case = prime_implicants[i].count("_" ) for j in range(len(SCREAMING_SNAKE_CASE__ ) ): if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE__ ): _snake_case = 1 return chart def snake_case_ ( ): '''simple docstring''' _snake_case = int(input("Enter the no. of variables\n" ) ) _snake_case = [ float(SCREAMING_SNAKE_CASE__ ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] _snake_case = decimal_to_binary(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _snake_case = check(SCREAMING_SNAKE_CASE__ ) print("Prime Implicants are:" ) print(SCREAMING_SNAKE_CASE__ ) _snake_case = prime_implicant_chart(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _snake_case = selection(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print("Essential Prime Implicants are:" ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
672
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : int = DistilBertTokenizer UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast UpperCAmelCase__ : List[str] = True @slow def UpperCamelCase( self ): _snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) _snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase ) _snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
672
1
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
def __magic_name__ ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __magic_name__ ( lowercase_ , lowercase_=0 ) -> Optional[Any]: '''simple docstring''' return sorted(lowercase_ , key=lambda lowercase_ : x[column] ) def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=float("inf" ) ) -> List[Any]: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase_ ): UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCamelCase = current_dis return min_dis def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=float("inf" ) ) -> List[str]: '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , lowercase_ ): for j in range(max(0 , i - 6 ) , lowercase_ ): UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: UpperCamelCase = current_dis return min_dis def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(lowercase_ , lowercase_ ) # recursion UpperCamelCase = points_counts // 2 UpperCamelCase = closest_pair_of_points_sqr( lowercase_ , points_sorted_on_y[:mid] , lowercase_ ) UpperCamelCase = closest_pair_of_points_sqr( lowercase_ , points_sorted_on_y[mid:] , points_counts - mid ) UpperCamelCase = min(lowercase_ , lowercase_ ) UpperCamelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase_ ) UpperCamelCase = dis_between_closest_in_strip( lowercase_ , len(lowercase_ ) , lowercase_ ) return min(lowercase_ , lowercase_ ) def __magic_name__ ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' UpperCamelCase = column_based_sort(lowercase_ , column=0 ) UpperCamelCase = column_based_sort(lowercase_ , column=1 ) return ( closest_pair_of_points_sqr( lowercase_ , lowercase_ , lowercase_ ) ) ** 0.5 if __name__ == "__main__": __a : str = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
606
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: UpperCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase_ ) if decoder_head_mask is None: UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ ) if cross_attn_head_mask is None: UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __UpperCAmelCase : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = encoder_layerdrop UpperCamelCase = decoder_layerdrop UpperCamelCase = max_position_embeddings UpperCamelCase = eos_token_id UpperCamelCase = pad_token_id UpperCamelCase = bos_token_id def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = self.eos_token_id # Eos Token UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase = self.get_config() UpperCamelCase = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, inputs_dict def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval() UpperCamelCase = inputs_dict["input_ids"] UpperCamelCase = inputs_dict["attention_mask"] UpperCamelCase = inputs_dict["head_mask"] # first forward pass UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )["last_hidden_state"] UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[ "last_hidden_state" ] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() UpperCamelCase = model(**SCREAMING_SNAKE_CASE ) UpperCamelCase = outputs.encoder_last_hidden_state UpperCamelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = model.get_encoder() encoder.save_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = model.get_decoder() decoder.save_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) UpperCamelCase = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowercase = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowercase = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowercase = True lowercase = True lowercase = False lowercase = False def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = MaMaaaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCamelCase = model_class(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertEqual(info["missing_keys"] , [] ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCamelCase = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if not self.is_encoder_decoder: UpperCamelCase = inputs["input_ids"] del inputs["input_ids"] else: UpperCamelCase = inputs["input_ids"] UpperCamelCase = inputs.get("decoder_input_ids" , SCREAMING_SNAKE_CASE ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , SCREAMING_SNAKE_CASE ) UpperCamelCase = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCamelCase = wte(SCREAMING_SNAKE_CASE ) else: UpperCamelCase = wte(SCREAMING_SNAKE_CASE ) UpperCamelCase = wte(SCREAMING_SNAKE_CASE ) with torch.no_grad(): model(**SCREAMING_SNAKE_CASE )[0] def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() UpperCamelCase = input_dict["input_ids"] UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) UpperCamelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval().to(SCREAMING_SNAKE_CASE ) if torch_device == "cuda": model.half() model.generate(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=3 ) def __magic_name__ ( lowercase_ ) -> int: '''simple docstring''' return torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ ) __a : List[str] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCAmelCase ( self ) -> str: """simple docstring""" return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE ) UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) # change to expected output here UpperCamelCase = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE ) # change to intended input UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) # change to expected output here UpperCamelCase = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE ) UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) UpperCamelCase = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" ) UpperCamelCase = model.generate( input_ids=dct["input_ids"].to(SCREAMING_SNAKE_CASE ) , attention_mask=dct["attention_mask"].to(SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) UpperCamelCase = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] UpperCamelCase = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) assert generated == expected_en
606
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dataset, SCREAMING_SNAKE_CASE__ : Dict[str, str] ) -> Optional[Any]: UpperCAmelCase_ : Dict = args.log_outputs UpperCAmelCase_ : Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric UpperCAmelCase_ : List[str] = load_metric('''wer''' ) UpperCAmelCase_ : Union[str, Any] = load_metric('''cer''' ) # compute metrics UpperCAmelCase_ : Any = wer.compute(references=result['''target'''], predictions=result['''prediction'''] ) UpperCAmelCase_ : Dict = cer.compute(references=result['''target'''], predictions=result['''prediction'''] ) # print & log results UpperCAmelCase_ : str = F"""WER: {wer_result}\nCER: {cer_result}""" print(SCREAMING_SNAKE_CASE__ ) with open(F"""{dataset_id}_eval_results.txt""", '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase_ : List[Any] = F"""log_{dataset_id}_predictions.txt""" UpperCAmelCase_ : Optional[int] = F"""log_{dataset_id}_targets.txt""" with open(SCREAMING_SNAKE_CASE__, '''w''' ) as p, open(SCREAMING_SNAKE_CASE__, '''w''' ) as t: # mapping function to write output def write_to_file(SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple ): p.write(F"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(SCREAMING_SNAKE_CASE__, with_indices=SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str: UpperCAmelCase_ : str = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase_ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE__, '''''', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase_ : Union[str, Any] = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: UpperCAmelCase_ : Optional[Any] = ''' '''.join(text.split(SCREAMING_SNAKE_CASE__ ) ) return text def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: # load dataset UpperCAmelCase_ : int = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=SCREAMING_SNAKE_CASE__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase_ : Any = feature_extractor.sampling_rate # resample audio UpperCAmelCase_ : List[str] = dataset.cast_column('''audio''', Audio(sampling_rate=SCREAMING_SNAKE_CASE__ ) ) # load eval pipeline if args.device is None: UpperCAmelCase_ : int = 0 if torch.cuda.is_available() else -1 UpperCAmelCase_ : List[Any] = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(SCREAMING_SNAKE_CASE__ : List[Any] ): UpperCAmelCase_ : List[str] = asr( batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) UpperCAmelCase_ : Optional[Any] = prediction['''text'''] UpperCAmelCase_ : Tuple = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples UpperCAmelCase_ : List[str] = dataset.map(SCREAMING_SNAKE_CASE__, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) snake_case_ : Any = parser.parse_args() main(args)
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowercase_ : def __init__( self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any]=13 , snake_case__ : Union[str, Any]=30 , snake_case__ : str=2 , snake_case__ : int=3 , snake_case__ : List[str]=True , snake_case__ : List[Any]=True , snake_case__ : Any=32 , snake_case__ : Optional[int]=2 , snake_case__ : Dict=4 , snake_case__ : Union[str, Any]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=10 , snake_case__ : int=0.02 , snake_case__ : Dict=3 , snake_case__ : List[Any]=None , snake_case__ : Dict=2 , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ = num_patches + 2 def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def __a ( self : List[Any] ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __a ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TFDeiTModel(config=snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(config=snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(snake_case__ ) SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : Any , snake_case__ : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(snake_case__ ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(snake_case__ ) SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowercase_ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowerCAmelCase__ =( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCAmelCase__ =( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCAmelCase__ =False lowerCAmelCase__ =False lowerCAmelCase__ =False lowerCAmelCase__ =False def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TFDeiTModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def __a ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __a ( self : Dict ): """simple docstring""" pass def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) SCREAMING_SNAKE_CASE_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(snake_case__ ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : str , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __a ( self : List[str] ): """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _a ( )-> int: SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowercase_ (unittest.TestCase ): @cached_property def __a ( self : List[Any] ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=snake_case__ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE_ = model(**snake_case__ ) # verify the logits SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , snake_case__ ) SCREAMING_SNAKE_CASE_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
360
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) SCREAMING_SNAKE_CASE: Optional[int] = 2_9_9_7_9_2_4_5_8 # Symbols SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = symbols('''ct x y z''') def _a ( lowerCAmelCase )-> float: if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def _a ( lowerCAmelCase )-> float: return 1 / sqrt(1 - beta(lowerCAmelCase ) ** 2 ) def _a ( lowerCAmelCase )-> np.ndarray: return np.array( [ [gamma(lowerCAmelCase ), -gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), 0, 0], [-gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), gamma(lowerCAmelCase ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def _a ( lowerCAmelCase , lowerCAmelCase = None )-> np.ndarray: # Ensure event is not empty if event is None: SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowerCAmelCase ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: SCREAMING_SNAKE_CASE: int = transform(2_9_9_7_9_2_4_5) print('''Example of four vector: ''') print(f"""ct' = {four_vector[0]}""") print(f"""x' = {four_vector[1]}""") print(f"""y' = {four_vector[2]}""") print(f"""z' = {four_vector[3]}""") # Substitute symbols with numerical values SCREAMING_SNAKE_CASE: List[Any] = {ct: c, x: 1, y: 1, z: 1} SCREAMING_SNAKE_CASE: Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)] print(f"""\n{numerical_vector}""")
360
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : List[str]=10 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : List[str]=[1, 1, 2, 1] , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]="relu" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Tuple=None , ): SCREAMING_SNAKE_CASE_ :Optional[int] = parent SCREAMING_SNAKE_CASE_ :List[str] = batch_size SCREAMING_SNAKE_CASE_ :Optional[Any] = image_size SCREAMING_SNAKE_CASE_ :str = num_channels SCREAMING_SNAKE_CASE_ :List[str] = embeddings_size SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_sizes SCREAMING_SNAKE_CASE_ :str = depths SCREAMING_SNAKE_CASE_ :str = is_training SCREAMING_SNAKE_CASE_ :Optional[int] = use_labels SCREAMING_SNAKE_CASE_ :List[Any] = hidden_act SCREAMING_SNAKE_CASE_ :List[Any] = num_labels SCREAMING_SNAKE_CASE_ :List[str] = scope SCREAMING_SNAKE_CASE_ :str = len(UpperCAmelCase) def _snake_case ( self : Optional[int]): SCREAMING_SNAKE_CASE_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ :str = None if self.use_labels: SCREAMING_SNAKE_CASE_ :List[Any] = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_ :int = self.get_config() return config, pixel_values, labels def _snake_case ( self : Optional[Any]): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _snake_case ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int): SCREAMING_SNAKE_CASE_ :Dict = TFRegNetModel(config=UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[str] = model(UpperCAmelCase , training=UpperCAmelCase) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict): SCREAMING_SNAKE_CASE_ :int = self.num_labels SCREAMING_SNAKE_CASE_ :Tuple = TFRegNetForImageClassification(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :str = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _snake_case ( self : Optional[Any]): SCREAMING_SNAKE_CASE_ :List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ :Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _UpperCAmelCase ( lowercase , lowercase , unittest.TestCase ): lowerCamelCase_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase_ : List[Any] = ( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : List[Any] = False lowerCamelCase_ : List[str] = False lowerCamelCase_ : Dict = False lowerCamelCase_ : Any = False def _snake_case ( self : List[str]): SCREAMING_SNAKE_CASE_ :Dict = TFRegNetModelTester(self) SCREAMING_SNAKE_CASE_ :Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase) def _snake_case ( self : List[str]): return @unittest.skip(reason="RegNet does not use inputs_embeds") def _snake_case ( self : Tuple): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _snake_case ( self : Any): super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings") def _snake_case ( self : Any): pass def _snake_case ( self : Any): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :Any = model_class(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[str] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ :Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ :List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase) def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase) def _snake_case ( self : Optional[int]): def check_hidden_states_output(UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]): SCREAMING_SNAKE_CASE_ :Any = model_class(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :int = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) , training=UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_ :str = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase) , expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ :Union[str, Any] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE_ :str = layer_type SCREAMING_SNAKE_CASE_ :Dict = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ :Optional[Any] = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) def _snake_case ( self : List[str]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]={}): SCREAMING_SNAKE_CASE_ :List[Any] = model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase).to_tuple() def recursive_check(UpperCAmelCase : str , UpperCAmelCase : Tuple): if isinstance(UpperCAmelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase): recursive_check(UpperCAmelCase , UpperCAmelCase) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(UpperCAmelCase , UpperCAmelCase)) , msg=( "Tuple and dict output are not equal. Difference:" F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ) , ) recursive_check(UpperCAmelCase , UpperCAmelCase) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ :List[str] = model_class(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Union[str, Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {"output_hidden_states": True}) SCREAMING_SNAKE_CASE_ :Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase) SCREAMING_SNAKE_CASE_ :int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase) check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {"output_hidden_states": True}) def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase) @slow def _snake_case ( self : Optional[int]): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ :Any = TFRegNetModel.from_pretrained(UpperCAmelCase) self.assertIsNotNone(UpperCAmelCase) def lowercase ( ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self : Optional[int]): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def _snake_case ( self : Any): SCREAMING_SNAKE_CASE_ :List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) SCREAMING_SNAKE_CASE_ :Tuple = self.default_image_processor SCREAMING_SNAKE_CASE_ :List[Any] = prepare_img() SCREAMING_SNAKE_CASE_ :List[Any] = image_processor(images=UpperCAmelCase , return_tensors="tf") # forward pass SCREAMING_SNAKE_CASE_ :Optional[int] = model(**UpperCAmelCase , training=UpperCAmelCase) # verify the logits SCREAMING_SNAKE_CASE_ :Dict = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Dict = tf.constant([-0.4180, -1.5051, -3.4836]) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4)
140
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase ( a , a , a = 1 / sqrt(2 ) ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Tuple = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :List[str] = sin(a ) SCREAMING_SNAKE_CASE_ :Tuple = cos(a ) SCREAMING_SNAKE_CASE_ :Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Optional[int] = (1 - _cos) / 2 SCREAMING_SNAKE_CASE_ :Dict = 1 - _cos SCREAMING_SNAKE_CASE_ :Tuple = 1 + alpha SCREAMING_SNAKE_CASE_ :Optional[Any] = -2 * _cos SCREAMING_SNAKE_CASE_ :Dict = 1 - alpha SCREAMING_SNAKE_CASE_ :Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowercase ( a , a , a = 1 / sqrt(2 ) ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :List[str] = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :Optional[int] = sin(a ) SCREAMING_SNAKE_CASE_ :int = cos(a ) SCREAMING_SNAKE_CASE_ :str = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Optional[int] = (1 + _cos) / 2 SCREAMING_SNAKE_CASE_ :int = -1 - _cos SCREAMING_SNAKE_CASE_ :Any = 1 + alpha SCREAMING_SNAKE_CASE_ :Any = -2 * _cos SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 - alpha SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowercase ( a , a , a = 1 / sqrt(2 ) ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Any = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :str = sin(a ) SCREAMING_SNAKE_CASE_ :Optional[int] = cos(a ) SCREAMING_SNAKE_CASE_ :Optional[Any] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Any = _sin / 2 SCREAMING_SNAKE_CASE_ :Optional[int] = 0 SCREAMING_SNAKE_CASE_ :str = -ba SCREAMING_SNAKE_CASE_ :str = 1 + alpha SCREAMING_SNAKE_CASE_ :Union[str, Any] = -2 * _cos SCREAMING_SNAKE_CASE_ :Tuple = 1 - alpha SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowercase ( a , a , a = 1 / sqrt(2 ) ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Any = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :Optional[Any] = sin(a ) SCREAMING_SNAKE_CASE_ :str = cos(a ) SCREAMING_SNAKE_CASE_ :Any = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 - alpha SCREAMING_SNAKE_CASE_ :int = -2 * _cos SCREAMING_SNAKE_CASE_ :Tuple = 1 + alpha SCREAMING_SNAKE_CASE_ :Any = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :List[str] = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :Any = sin(a ) SCREAMING_SNAKE_CASE_ :Any = cos(a ) SCREAMING_SNAKE_CASE_ :List[str] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :str = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE_ :str = 1 + alpha * big_a SCREAMING_SNAKE_CASE_ :int = -2 * _cos SCREAMING_SNAKE_CASE_ :List[Any] = 1 - alpha * big_a SCREAMING_SNAKE_CASE_ :Optional[Any] = 1 + alpha / big_a SCREAMING_SNAKE_CASE_ :Optional[Any] = -2 * _cos SCREAMING_SNAKE_CASE_ :Any = 1 - alpha / big_a SCREAMING_SNAKE_CASE_ :Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :List[Any] = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :Optional[int] = sin(a ) SCREAMING_SNAKE_CASE_ :Union[str, Any] = cos(a ) SCREAMING_SNAKE_CASE_ :str = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Union[str, Any] = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE_ :Optional[int] = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE_ :Tuple = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE_ :List[Any] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE_ :Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE_ :Optional[Any] = 2 * sqrt(a ) * alpha SCREAMING_SNAKE_CASE_ :Optional[Any] = big_a * (pmc + aaa) SCREAMING_SNAKE_CASE_ :str = 2 * big_a * mpc SCREAMING_SNAKE_CASE_ :List[Any] = big_a * (pmc - aaa) SCREAMING_SNAKE_CASE_ :Optional[int] = ppmc + aaa SCREAMING_SNAKE_CASE_ :Dict = -2 * pmpc SCREAMING_SNAKE_CASE_ :str = ppmc - aaa SCREAMING_SNAKE_CASE_ :Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowercase ( a , a , a , a = 1 / sqrt(2 ) , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Optional[Any] = tau * frequency / samplerate SCREAMING_SNAKE_CASE_ :Any = sin(a ) SCREAMING_SNAKE_CASE_ :Tuple = cos(a ) SCREAMING_SNAKE_CASE_ :Tuple = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE_ :Any = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE_ :Dict = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE_ :Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE_ :List[str] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE_ :Tuple = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE_ :Dict = 2 * sqrt(a ) * alpha SCREAMING_SNAKE_CASE_ :int = big_a * (ppmc + aaa) SCREAMING_SNAKE_CASE_ :Union[str, Any] = -2 * big_a * pmpc SCREAMING_SNAKE_CASE_ :Any = big_a * (ppmc - aaa) SCREAMING_SNAKE_CASE_ :List[str] = pmc + aaa SCREAMING_SNAKE_CASE_ :Optional[int] = 2 * mpc SCREAMING_SNAKE_CASE_ :Union[str, Any] = pmc - aaa SCREAMING_SNAKE_CASE_ :Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
140
1
"""simple docstring""" import heapq def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict ): lowerCAmelCase = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_UpperCAmelCase , [-1 * len(_UpperCAmelCase ), (key, value)] ) # chosen_vertices = set of chosen vertices lowerCAmelCase = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices lowerCAmelCase = heapq.heappop(_UpperCAmelCase )[1][0] chosen_vertices.add(_UpperCAmelCase ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: lowerCAmelCase = elem[1][1].index(_UpperCAmelCase ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_UpperCAmelCase ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Tuple = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
4
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ("foo.json",)] ) def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Dict: '''simple docstring''' UpperCamelCase : Any = GenerationConfig( do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase , config_name=lowerCamelCase ) UpperCamelCase : Optional[Any] = GenerationConfig.from_pretrained(lowerCamelCase , config_name=lowerCamelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowerCamelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' UpperCamelCase : Any = AutoConfig.from_pretrained("gpt2" ) UpperCamelCase : Dict = GenerationConfig.from_model_config(lowerCamelCase ) UpperCamelCase : Any = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowerCamelCase , lowerCamelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase : int = GenerationConfig() UpperCamelCase : List[Any] = { "max_new_tokens": 10_24, "foo": "bar", } UpperCamelCase : Union[str, Any] = copy.deepcopy(lowerCamelCase ) UpperCamelCase : Union[str, Any] = generation_config.update(**lowerCamelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(lowerCamelCase , lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 10_24 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowerCamelCase , {"foo": "bar"} ) def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' UpperCamelCase : str = GenerationConfig() UpperCamelCase : str = "bar" with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir: generation_config.save_pretrained(lowerCamelCase ) UpperCamelCase : Optional[int] = GenerationConfig.from_pretrained(lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar" ) UpperCamelCase : List[str] = GenerationConfig.from_model_config(lowerCamelCase ) assert not hasattr(lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' UpperCamelCase : Optional[Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , lowerCamelCase ) self.assertEqual(default_config.num_beams , 1 ) UpperCamelCase : Optional[Any] = GenerationConfig( do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , lowerCamelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase ) UpperCamelCase : Any = GenerationConfig.from_pretrained(lowerCamelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , lowerCamelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def SCREAMING_SNAKE_CASE__ ( cls ) -> Any: '''simple docstring''' UpperCamelCase : Optional[int] = TOKEN HfFolder.save_token(lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls ) -> Optional[int]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-generation-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCamelCase : List[Any] = GenerationConfig( do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token ) UpperCamelCase : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase , repo_id="test-generation-config" , push_to_hub=lowerCamelCase , use_auth_token=self._token ) UpperCamelCase : Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' UpperCamelCase : Union[str, Any] = GenerationConfig( do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token ) UpperCamelCase : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token ) UpperCamelCase : Tuple = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
173
0
"""simple docstring""" from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class lowerCAmelCase ( snake_case__ ): '''simple docstring''' A = 'openai/whisper-base' A = ( 'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ' 'transcribed text.' ) A = 'transcriber' A = WhisperProcessor A = WhisperForConditionalGeneration A = ['audio'] A = ['text'] def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :int ) -> Dict: """simple docstring""" return self.pre_processor(lowerCamelCase_ , return_tensors="pt" ).input_features def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.model.generate(inputs=lowerCamelCase_ ) def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :int ) -> List[Any]: """simple docstring""" return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
715
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase : '''simple docstring''' def __init__( self :List[str] , lowerCamelCase_ :List[Any] ) -> Dict: """simple docstring""" UpperCamelCase__ = data UpperCamelCase__ = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0] @staticmethod def lowerCamelCase__ ( lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple ) -> int: """simple docstring""" return ((n << b) | (n >> (3_2 - b))) & 0Xffff_ffff def lowerCamelCase__ ( self :List[str] ) -> str: """simple docstring""" UpperCamelCase__ = b"\x80" + b"\x00" * (6_3 - (len(self.data ) + 8) % 6_4) UpperCamelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def lowerCamelCase__ ( self :Optional[Any] ) -> Any: """simple docstring""" return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :Dict ) -> Any: """simple docstring""" UpperCamelCase__ = list(struct.unpack(">16L" , lowerCamelCase_ ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): UpperCamelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def lowerCamelCase__ ( self :Union[str, Any] ) -> List[str]: """simple docstring""" UpperCamelCase__ = self.padding() UpperCamelCase__ = self.split_blocks() for block in self.blocks: UpperCamelCase__ = self.expand_block(lowerCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: UpperCamelCase__ = (b & c) | ((~b) & d) UpperCamelCase__ = 0X5a82_7999 elif 2_0 <= i < 4_0: UpperCamelCase__ = b ^ c ^ d UpperCamelCase__ = 0X6ed9_eba1 elif 4_0 <= i < 6_0: UpperCamelCase__ = (b & c) | (b & d) | (c & d) UpperCamelCase__ = 0X8f1b_bcdc elif 6_0 <= i < 8_0: UpperCamelCase__ = b ^ c ^ d UpperCamelCase__ = 0Xca62_c1d6 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = ( self.rotate(lowerCamelCase_ , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff, a, self.rotate(lowerCamelCase_ , 3_0 ), c, d, ) UpperCamelCase__ = ( self.h[0] + a & 0Xffff_ffff, self.h[1] + b & 0Xffff_ffff, self.h[2] + c & 0Xffff_ffff, self.h[3] + d & 0Xffff_ffff, self.h[4] + e & 0Xffff_ffff, ) return ("{:08x}" * 5).format(*self.h ) def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = b"Test String" assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324 def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: UpperCamelCase__ = f.read() else: UpperCamelCase__ = bytes(_snake_case , "utf-8" ) print(SHAaHash(_snake_case ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
304
0