code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations def A ( snake_case :float , snake_case :float , snake_case :float ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance < 0: raise ValueError('Resistance cannot be negative' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 13 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 2 __UpperCamelCase = 99 __UpperCamelCase = 0 __UpperCamelCase = 32 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 512 __UpperCamelCase = 16 __UpperCamelCase = 2 __UpperCamelCase = 0.0_2 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = 'last' __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCamelCase = None if self.use_input_lengths: __UpperCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'langs': token_type_ids, 'lengths': input_lengths, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' ) __UpperCamelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCamelCase = model(__UpperCAmelCase )[0] __UpperCamelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
316
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case_ ( unittest.TestCase ): def __init__( self : List[str] , _snake_case : int , _snake_case : Optional[int]=7 , _snake_case : Any=3 , _snake_case : str=18 , _snake_case : Dict=30 , _snake_case : List[str]=400 , _snake_case : Tuple=True , _snake_case : Optional[int]=None , _snake_case : List[Any]=True , )->int: '''simple docstring''' __lowerCAmelCase : List[str] = size if size is not None else {"""height""": 18, """width""": 18} __lowerCAmelCase : str = parent __lowerCAmelCase : Union[str, Any] = batch_size __lowerCAmelCase : List[Any] = num_channels __lowerCAmelCase : List[Any] = image_size __lowerCAmelCase : Dict = min_resolution __lowerCAmelCase : List[str] = max_resolution __lowerCAmelCase : Dict = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Optional[int] = apply_ocr def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]: '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case_ ( __lowercase ,unittest.TestCase ): A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase__ ( self : List[str] )->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : List[Any] )->List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Dict )->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , """do_resize""" ) ) self.assertTrue(hasattr(_snake_case , """size""" ) ) self.assertTrue(hasattr(_snake_case , """apply_ocr""" ) ) def UpperCAmelCase__ ( self : str )->str: '''simple docstring''' __lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) __lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]: '''simple docstring''' pass def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]: '''simple docstring''' __lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , _snake_case ) self.assertIsInstance(encoding.boxes , _snake_case ) # Test batched __lowerCAmelCase : str = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase__ ( self : Any )->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCAmelCase : Any = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase__ ( self : str )->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCAmelCase : List[Any] = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase__ ( self : List[Any] )->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __lowerCAmelCase : List[str] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) __lowerCAmelCase : Dict = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) __lowerCAmelCase : Tuple = image_processing(_snake_case , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __lowerCAmelCase : List[str] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 __lowerCAmelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _snake_case ) self.assertListEqual(encoding.boxes , _snake_case ) # with apply_OCR = False __lowerCAmelCase : List[str] = LayoutLMvaImageProcessor(apply_ocr=_snake_case ) __lowerCAmelCase : List[Any] = image_processing(_snake_case , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
352
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case_ ( __lowercase ): A_ = ['image_processor', 'tokenizer'] A_ = 'ChineseCLIPImageProcessor' A_ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , **_snake_case : int )->List[str]: '''simple docstring''' __lowerCAmelCase : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _snake_case , ) __lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" ) __lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_snake_case , _snake_case ) __lowerCAmelCase : Any = self.image_processor def __call__( self : Optional[Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Any )->Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __lowerCAmelCase : List[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: __lowerCAmelCase : Tuple = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: __lowerCAmelCase : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def UpperCAmelCase__ ( self : Optional[int] , *_snake_case : Union[str, Any] , **_snake_case : int )->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def UpperCAmelCase__ ( self : Dict , *_snake_case : Dict , **_snake_case : Any )->Dict: '''simple docstring''' return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def UpperCAmelCase__ ( self : Optional[int] )->str: '''simple docstring''' __lowerCAmelCase : Tuple = self.tokenizer.model_input_names __lowerCAmelCase : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__ ( self : Dict )->Union[str, Any]: '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , ) return self.image_processor_class
232
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = 384 if "tiny" in model_name: UpperCAmelCase__ = [3, 3, 9, 3] UpperCAmelCase__ = [96, 192, 384, 768] if "small" in model_name: UpperCAmelCase__ = [3, 3, 27, 3] UpperCAmelCase__ = [96, 192, 384, 768] if "base" in model_name: UpperCAmelCase__ = [3, 3, 27, 3] UpperCAmelCase__ = [128, 256, 512, 1024] UpperCAmelCase__ = 512 if "large" in model_name: UpperCAmelCase__ = [3, 3, 27, 3] UpperCAmelCase__ = [192, 384, 768, 1536] UpperCAmelCase__ = 768 if "xlarge" in model_name: UpperCAmelCase__ = [3, 3, 27, 3] UpperCAmelCase__ = [256, 512, 1024, 2048] UpperCAmelCase__ = 1024 # set label information UpperCAmelCase__ = 150 UpperCAmelCase__ = "huggingface/label-files" UpperCAmelCase__ = "ade20k-id2label.json" UpperCAmelCase__ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) ) UpperCAmelCase__ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase__ = {v: k for k, v in idalabel.items()} UpperCAmelCase__ = ConvNextConfig( depths=__UpperCAmelCase , hidden_sizes=__UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) UpperCAmelCase__ = UperNetConfig( backbone_config=__UpperCAmelCase , auxiliary_in_channels=__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase , ) return config def lowerCAmelCase ( _lowerCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dct.pop(__UpperCAmelCase ) UpperCAmelCase__ = val def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ): """simple docstring""" UpperCAmelCase__ = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } UpperCAmelCase__ = model_name_to_url[model_name] UpperCAmelCase__ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="cpu" )["state_dict"] UpperCAmelCase__ = get_upernet_config(__UpperCAmelCase ) UpperCAmelCase__ = UperNetForSemanticSegmentation(__UpperCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCAmelCase__ = state_dict.pop(__UpperCAmelCase ) if "bn" in key: UpperCAmelCase__ = key.replace("bn" , "batch_norm" ) UpperCAmelCase__ = val # rename keys UpperCAmelCase__ = create_rename_keys(__UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) # verify on image UpperCAmelCase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" UpperCAmelCase__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" ) UpperCAmelCase__ = SegformerImageProcessor() UpperCAmelCase__ = processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): UpperCAmelCase__ = model(__UpperCAmelCase ) if model_name == "upernet-convnext-tiny": UpperCAmelCase__ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": UpperCAmelCase__ = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": UpperCAmelCase__ = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": UpperCAmelCase__ = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": UpperCAmelCase__ = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCAmelCase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[F'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
169
import re from filelock import FileLock try: import nltk lowerCamelCase__ : str = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ : Union[str, Any] = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str: re.sub('<n>' , '' , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
225
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def A_ ( self : int, _UpperCAmelCase : Optional[int]=0 ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : str = np.random.RandomState(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def A_ ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def A_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def A_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) SCREAMING_SNAKE_CASE__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) # warmup pass to apply optimizations SCREAMING_SNAKE_CASE__ : Tuple = pipe(**self.get_dummy_inputs() ) SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def A_ ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def A_ ( self : Any ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) SCREAMING_SNAKE_CASE__ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : Any = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) SCREAMING_SNAKE_CASE__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE__ : str = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase (unittest.TestCase ): """simple docstring""" @property def A_ ( self : Tuple ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE__ : Optional[int] = False return options def A_ ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) SCREAMING_SNAKE_CASE__ : List[Any] = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = "A fantasy landscape, trending on artstation" SCREAMING_SNAKE_CASE__ : List[Any] = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = pipe( prompt=_UpperCAmelCase, image=_UpperCAmelCase, strength=0.75, guidance_scale=7.5, num_inference_steps=1_0, generator=_UpperCAmelCase, output_type="np", ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images SCREAMING_SNAKE_CASE__ : List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def A_ ( self : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) SCREAMING_SNAKE_CASE__ : str = init_image.resize((7_6_8, 5_1_2) ) SCREAMING_SNAKE_CASE__ : Any = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) SCREAMING_SNAKE_CASE__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=_UpperCAmelCase, safety_checker=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "A fantasy landscape, trending on artstation" SCREAMING_SNAKE_CASE__ : Any = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe( prompt=_UpperCAmelCase, image=_UpperCAmelCase, strength=0.75, guidance_scale=7.5, num_inference_steps=2_0, generator=_UpperCAmelCase, output_type="np", ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images SCREAMING_SNAKE_CASE__ : List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
191
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = set() SCREAMING_SNAKE_CASE__ : Dict = int((limit - 24) ** (1 / 2) ) SCREAMING_SNAKE_CASE__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) ) for primea in primes: SCREAMING_SNAKE_CASE__ : Optional[int] = primea * primea for primea in primes: SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: SCREAMING_SNAKE_CASE__ : List[str] = primea * primea * primea * primea SCREAMING_SNAKE_CASE__ : Optional[int] = square + cube + tetr if total >= limit: break ret.add(SCREAMING_SNAKE_CASE__ ) return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(f"{solution() = }")
191
1
import functools def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' _lowerCamelCase : List[Any] =len(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE__ ) @functools.cache def min_distance(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _lowerCamelCase : Any =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
199
from collections.abc import Generator from math import sin def a_ ( SCREAMING_SNAKE_CASE__ : bytes ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) != 32: raise ValueError('Input must be of length 32' ) _lowerCamelCase : Any =b'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def a_ ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) _lowerCamelCase : Optional[int] =format(SCREAMING_SNAKE_CASE__ , '08x' )[-8:] _lowerCamelCase : List[Any] =b'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def a_ ( SCREAMING_SNAKE_CASE__ : bytes ): '''simple docstring''' _lowerCamelCase : Dict =b'' for char in message: bit_string += format(SCREAMING_SNAKE_CASE__ , '08b' ).encode('utf-8' ) _lowerCamelCase : Dict =format(len(SCREAMING_SNAKE_CASE__ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(SCREAMING_SNAKE_CASE__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def a_ ( SCREAMING_SNAKE_CASE__ : bytes ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) % 512 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 512 ): _lowerCamelCase : str =bit_string[pos : pos + 512] _lowerCamelCase : Tuple =[] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def a_ ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) _lowerCamelCase : List[str] =format(SCREAMING_SNAKE_CASE__ , '032b' ) _lowerCamelCase : List[str] ='' for c in i_str: new_str += "1" if c == "0" else "0" return int(SCREAMING_SNAKE_CASE__ , 2 ) def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return (a + b) % 2**32 def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def a_ ( SCREAMING_SNAKE_CASE__ : bytes ): '''simple docstring''' _lowerCamelCase : int =preprocess(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : Dict =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _lowerCamelCase : str =0X67_45_23_01 _lowerCamelCase : Union[str, Any] =0Xef_cd_ab_89 _lowerCamelCase : List[str] =0X98_ba_dc_fe _lowerCamelCase : int =0X10_32_54_76 _lowerCamelCase : Optional[int] =[ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(SCREAMING_SNAKE_CASE__ ): _lowerCamelCase : Union[str, Any] =aa _lowerCamelCase : List[Any] =ba _lowerCamelCase : Dict =ca _lowerCamelCase : List[Any] =da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _lowerCamelCase : Optional[Any] =d ^ (b & (c ^ d)) _lowerCamelCase : Dict =i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _lowerCamelCase : Optional[int] =c ^ (d & (b ^ c)) _lowerCamelCase : List[str] =(5 * i + 1) % 16 elif i <= 47: _lowerCamelCase : str =b ^ c ^ d _lowerCamelCase : int =(3 * i + 5) % 16 else: _lowerCamelCase : Optional[Any] =c ^ (b | not_aa(SCREAMING_SNAKE_CASE__ )) _lowerCamelCase : Dict =(7 * i) % 16 _lowerCamelCase : Any =(f + a + added_consts[i] + block_words[g]) % 2**32 _lowerCamelCase : str =d _lowerCamelCase : Any =c _lowerCamelCase : Union[str, Any] =b _lowerCamelCase : Dict =sum_aa(SCREAMING_SNAKE_CASE__ , left_rotate_aa(SCREAMING_SNAKE_CASE__ , shift_amounts[i] ) ) # Add hashed chunk to running total _lowerCamelCase : Optional[Any] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : List[str] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : Optional[int] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : List[str] =sum_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : List[Any] =reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) + reformat_hex(SCREAMING_SNAKE_CASE__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
199
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer A_ :Any = logging.get_logger(__name__) A_ :Union[str, Any] = {'''vocab_file''': '''vocab.txt'''} A_ :List[str] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } A_ :Any = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } A_ :Union[str, Any] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class __A ( a ): """simple docstring""" UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Tuple =PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str =ConvBertTokenizer def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ): """simple docstring""" super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) __UpperCamelCase : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCamelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCamelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCamelCase__ ) != tokenize_chinese_chars ): __UpperCamelCase : Union[str, Any] =getattr(lowerCamelCase__ , normalizer_state.pop('type' ) ) __UpperCamelCase : str =do_lower_case __UpperCamelCase : List[str] =strip_accents __UpperCamelCase : Any =tokenize_chinese_chars __UpperCamelCase : Tuple =normalizer_class(**lowerCamelCase__ ) __UpperCamelCase : List[str] =do_lower_case def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" __UpperCamelCase : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : Optional[int] =[self.sep_token_id] __UpperCamelCase : str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : Any =self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
245
def A ( a_ ) -> bool: if number < 0: raise ValueError('number must not be negative' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
245
1
import math import random from typing import Any from .hill_climbing import SearchProblem def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : float = math.inf , SCREAMING_SNAKE_CASE_ : float = -math.inf , SCREAMING_SNAKE_CASE_ : float = math.inf , SCREAMING_SNAKE_CASE_ : float = -math.inf , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 100 , SCREAMING_SNAKE_CASE_ : float = 0.01 , SCREAMING_SNAKE_CASE_ : float = 1 , ): '''simple docstring''' lowercase__ : Union[str, Any] = False lowercase__ : Tuple = search_prob lowercase__ : str = start_temperate lowercase__ : str = [] lowercase__ : Optional[int] = 0 lowercase__ : Union[str, Any] = None while not search_end: lowercase__ : Optional[Any] = current_state.score() if best_state is None or current_score > best_state.score(): lowercase__ : Optional[int] = current_state scores.append(SCREAMING_SNAKE_CASE_ ) iterations += 1 lowercase__ : List[Any] = None lowercase__ : Union[str, Any] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowercase__ : Any = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor lowercase__ : Any = neighbors.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ : Tuple = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowercase__ : int = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowercase__ : Tuple = picked_neighbor else: lowercase__ : int = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowercase__ : str = picked_neighbor lowercase__ : str = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowercase__ : Union[str, Any] = True else: lowercase__ : List[Any] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) snake_case_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) snake_case_ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) snake_case_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) snake_case_ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' return (3 * x**2) - (6 * y) snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) snake_case_ = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) snake_case_ = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
214
import unittest from transformers import DonutProcessor snake_case_ = '''naver-clova-ix/donut-base''' class SCREAMING_SNAKE_CASE__ (unittest.TestCase ): def snake_case_ ( self): lowercase__ : Dict = DonutProcessor.from_pretrained(a) def snake_case_ ( self): lowercase__ : Tuple = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowercase__ : Tuple = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowercase__ : str = self.processor.tokenajson(a) self.assertDictEqual(a , a)
214
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def A ( a_ ) -> Any: __UpperCamelCase : Tuple =[ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(a_ ,a_ ) def A ( a_ ) -> List[str]: __UpperCamelCase , __UpperCamelCase : List[str] =emb.weight.shape __UpperCamelCase : Tuple =nn.Linear(a_ ,a_ ,bias=a_ ) __UpperCamelCase : Optional[int] =emb.weight.data return lin_layer def A ( a_ ,a_=None ) -> Optional[Any]: __UpperCamelCase : Dict ={} for old_key in state_dict.keys(): __UpperCamelCase : int =old_key if "moe_layer.experts." in key: if expert_idx is not None: __UpperCamelCase : Tuple =key.replace('moe_layer.experts.0' ,F'ffn.experts.expert_{expert_idx}' ) else: __UpperCamelCase : Dict =key.replace('moe_layer.experts.' ,'ffn.experts.expert_' ) if "gate" in key: __UpperCamelCase : Any =key.replace('.moe_layer.gate.wg' ,'.ffn.router.classifier' ) if "fc2" and "experts" not in key: __UpperCamelCase : Union[str, Any] =key.replace('.fc2.' ,'.ffn.fc2.' ) if "fc1" and "experts" not in key: __UpperCamelCase : str =key.replace('.fc1.' ,'.ffn.fc1.' ) if ".encoder_attn." in key: __UpperCamelCase : Tuple =key.replace('.encoder_attn.' ,'.cross_attention.' ) if "encoder_attn_layer_norm" in key: __UpperCamelCase : str =key.replace('encoder_attn_layer_norm' ,'cross_attention_layer_norm' ) if "final_layer_norm" in key: __UpperCamelCase : Any =key.replace('final_layer_norm' ,'ff_layer_norm' ) __UpperCamelCase : List[Any] =state_dict[old_key] return new_dict def A ( a_ ,a_ ,a_ ,a_ ,a_ = WEIGHTS_NAME ) -> Dict: __UpperCamelCase : str =[] __UpperCamelCase : Optional[Any] =0 os.makedirs(a_ ,exist_ok=a_ ) for expert in range(a_ ): __UpperCamelCase : int =switch_checkpoint_path + F'-rank-{expert}.pt' if os.path.isfile(a_ ): __UpperCamelCase : Optional[Any] =torch.load(a_ )['model'] remove_ignore_keys_(a_ ) __UpperCamelCase : int =rename_fairseq_keys(a_ ,a_ ) __UpperCamelCase : int =os.path.join( a_ ,weights_name.replace('.bin' ,F'-{len(a_ )+1:05d}-of-???.bin' ) ) torch.save(a_ ,a_ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(a_ )[0]].dtype ) # Add the last block __UpperCamelCase : Dict =os.path.join(a_ ,weights_name.replace('.bin' ,F'-{len(a_ )+1:05d}-of-???.bin' ) ) __UpperCamelCase : List[str] =torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(a_ ) __UpperCamelCase : List[str] =rename_fairseq_keys(a_ ,a_ ) __UpperCamelCase : str =shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(a_ ) == 1: __UpperCamelCase : List[Any] =os.path.join(a_ ,a_ ) torch.save(a_ ,a_ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(a_ ,a_ ) # Otherwise, let's build the index __UpperCamelCase : List[Any] ={} for idx, shard in enumerate(a_ ): __UpperCamelCase : Optional[int] =weights_name.replace('.bin' ,F'-{idx+1:05d}-of-{len(a_ ):05d}.bin' ) __UpperCamelCase : Tuple =os.path.join(a_ ,weights_name.replace('.bin' ,F'-{idx+1:05d}-of-???.bin' ) ) os.rename(a_ ,os.path.join(a_ ,a_ ) ) for key in shard: __UpperCamelCase : Union[str, Any] =shard_file # Add the metadata __UpperCamelCase : Optional[int] ={'total_size': total_size} __UpperCamelCase : Optional[int] ={'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(a_ ,a_ ) ,'w' ,encoding='utf-8' ) as f: __UpperCamelCase : Any =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n' f.write(a_ ) return metadata, index if __name__ == "__main__": A_ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) A_ :Dict = parser.parse_args() A_ ,A_ :List[str] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) A_ :int = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) A_ :Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
245
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Optional[Any] = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Optional[int] = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
245
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCAmelCase__ ( lowercase__ ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = ['''vqvae'''] def __init__( self : Dict ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def __lowercase ( self : int ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : int ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : Union[str, Any]=True ,): '''simple docstring''' _a : Union[str, Any] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) _a : Optional[int] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _a : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _a : Optional[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) _a : Optional[int] = noise _a : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) _a : List[Any] = self.mel.audio_slice_to_image(_a ) _a : Optional[Any] = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape( (input_image.height, input_image.width) ) _a : Union[str, Any] = (input_image / 255) * 2 - 1 _a : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: _a : Dict = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] _a : Optional[Any] = self.vqvae.config.scaling_factor * input_images if start_step > 0: _a : str = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) _a : int = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _a : Optional[Any] = int(mask_start_secs * pixels_per_second ) _a : Any = int(mask_end_secs * pixels_per_second ) _a : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): _a : Union[str, Any] = self.unet(_a ,_a ,_a )['sample'] else: _a : List[Any] = self.unet(_a ,_a )['sample'] if isinstance(self.scheduler ,_a ): _a : Optional[Any] = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)['prev_sample'] else: _a : str = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)['prev_sample'] if mask is not None: if mask_start > 0: _a : str = mask[:, step, :, :mask_start] if mask_end > 0: _a : Union[str, Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _a : Tuple = 1 / self.vqvae.config.scaling_factor * images _a : Any = self.vqvae.decode(_a )['sample'] _a : List[Any] = (images / 2 + 0.5).clamp(0 ,1 ) _a : Union[str, Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() _a : Union[str, Any] = (images * 255).round().astype('uint8' ) _a : Optional[int] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode='RGB' ).convert('L' ) for _ in images) ) _a : int = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def __lowercase ( self : int ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) _a : int = np.array( [np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) _a : Union[str, Any] = (sample / 255) * 2 - 1 _a : str = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): _a : Dict = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _a : List[str] = self.scheduler.alphas_cumprod[t] _a : Dict = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _a : Any = 1 - alpha_prod_t _a : List[Any] = self.unet(_a ,_a )['sample'] _a : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output _a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _a : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __lowercase ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' _a : Union[str, Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
271
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _a : Any = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,) return model @property def __lowercase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _a : Union[str, Any] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,) return model @property def __lowercase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _a : Any = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(_a ) def __lowercase ( self : Tuple ): '''simple docstring''' _a : Dict = self.dummy_uncond_unet _a : List[Any] = DDIMScheduler() _a : List[Any] = self.dummy_vq_model _a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a ) ldm.to(_a ) ldm.set_progress_bar_config(disable=_a ) _a : List[str] = torch.manual_seed(0 ) _a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images _a : List[str] = torch.manual_seed(0 ) _a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0] _a : Tuple = image[0, -3:, -3:, -1] _a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) _a : Any = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' ) ldm.to(_a ) ldm.set_progress_bar_config(disable=_a ) _a : Optional[int] = torch.manual_seed(0 ) _a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images _a : str = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) _a : int = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
271
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ): __UpperCamelCase = ['input_features'] def __init__(self , lowerCamelCase=80 , lowerCamelCase=16_000 , lowerCamelCase=160 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=0.0 , lowerCamelCase=False , **lowerCamelCase , ): '''simple docstring''' super().__init__( feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) _lowerCAmelCase = n_fft _lowerCAmelCase = hop_length _lowerCAmelCase = chunk_length _lowerCAmelCase = chunk_length * sampling_rate _lowerCAmelCase = self.n_samples // hop_length _lowerCAmelCase = sampling_rate _lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = spectrogram( UpperCamelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) _lowerCAmelCase = log_spec[:, :-1] _lowerCAmelCase = np.maximum(UpperCamelCase__ , log_spec.max() - 8.0 ) _lowerCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 ): '''simple docstring''' if attention_mask is not None: _lowerCAmelCase = np.array(UpperCamelCase__ , np.intaa ) _lowerCAmelCase = [] for vector, length in zip(UpperCamelCase__ , attention_mask.sum(-1 ) ): _lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _lowerCAmelCase = padding_value normed_input_values.append(UpperCamelCase__ ) else: _lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__(self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "max_length" , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) _lowerCAmelCase = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) _lowerCAmelCase = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): _lowerCAmelCase = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCAmelCase = [np.asarray([raw_speech] ).T] _lowerCAmelCase = BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding _lowerCAmelCase = self.pad( UpperCamelCase__ , padding=UpperCamelCase__ , max_length=max_length if max_length else self.n_samples , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) _lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0 ) # make sure list is in array format _lowerCAmelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 ) _lowerCAmelCase = [self._np_extract_fbank_features(UpperCamelCase__ ) for waveform in input_features[0]] if isinstance(input_features[0] , UpperCamelCase__ ): _lowerCAmelCase = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features] else: _lowerCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _lowerCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: _lowerCAmelCase = padded_inputs.convert_to_tensors(UpperCamelCase__ ) return padded_inputs def A__ (self ): '''simple docstring''' _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
367
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "philschmid/bart-large-cnn-samsum" snake_case__ : str = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) snake_case__ : Union[str, Any] = "summarizer" snake_case__ : Optional[Any] = AutoTokenizer snake_case__ : Optional[int] = AutoModelForSeqaSeqLM snake_case__ : Any = ["text"] snake_case__ : Tuple = ["text"] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Optional[Any] ) -> int: return self.pre_processor(UpperCAmelCase__ , return_tensors="pt" , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : int ) -> Any: return self.model.generate(**UpperCAmelCase__ )[0] def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ) -> List[Any]: return self.pre_processor.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
54
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=14 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : Dict=0.0_2 , ): '''simple docstring''' lowerCamelCase__ : int = parent lowerCamelCase__ : Any = batch_size lowerCamelCase__ : Tuple = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : List[Any] = use_input_mask lowerCamelCase__ : Optional[Any] = use_token_type_ids lowerCamelCase__ : Optional[Any] = use_labels lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : Any = hidden_size lowerCamelCase__ : int = rotary_dim lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Tuple = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : str = max_position_embeddings lowerCamelCase__ : Tuple = initializer_range lowerCamelCase__ : Any = None lowerCamelCase__ : Optional[Any] = vocab_size - 1 lowerCamelCase__ : List[Any] = vocab_size - 1 lowerCamelCase__ : str = vocab_size - 1 def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : List[str] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Dict = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Tuple = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs lowerCamelCase__ : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : str = 20 lowerCamelCase__ : Tuple = model_class_name(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , __lowerCamelCase ) lowerCamelCase__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" ) lowerCamelCase__ : str = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase__ : Tuple = model( input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , ) lowerCamelCase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) lowerCamelCase__ : str = model( input_ids[:, -1:] , attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=__lowerCamelCase , ) lowerCamelCase__ : List[Any] = model(__lowerCamelCase ) lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : int = 20 lowerCamelCase__ : int = model_class_name(__lowerCamelCase ) lowerCamelCase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , __lowerCamelCase ) lowerCamelCase__ : Any = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase__ : List[Any] = model( input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) lowerCamelCase__ : Any = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__lowerCamelCase , position_ids=__lowerCamelCase , ) lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) @require_flax class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () A__ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : str ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @tooslow def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" ) lowerCamelCase__ : List[str] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__lowerCamelCase , truncation=__lowerCamelCase ) lowerCamelCase__ : Any = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" ) lowerCamelCase__ : Optional[Any] = False lowerCamelCase__ : str = model.config.eos_token_id lowerCamelCase__ : int = jax.jit(model.generate ) lowerCamelCase__ : str = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase__ : Tuple = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) lowerCamelCase__ : Any = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @is_pt_flax_cross_test def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase__ : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase__ : int = getattr(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Optional[int] = pt_inputs["input_ids"].shape lowerCamelCase__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__lowerCamelCase ): lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : Optional[Any] = 0 lowerCamelCase__ : Any = 1 lowerCamelCase__ : List[Any] = pt_model_class(__lowerCamelCase ).eval() lowerCamelCase__ : List[str] = model_class(__lowerCamelCase , dtype=jnp.floataa ) lowerCamelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = fx_state with torch.no_grad(): lowerCamelCase__ : List[Any] = pt_model(**__lowerCamelCase ).to_tuple() lowerCamelCase__ : Union[str, Any] = fx_model(**__lowerCamelCase ).to_tuple() self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : List[Any] = model_class.from_pretrained(__lowerCamelCase , from_pt=__lowerCamelCase ) lowerCamelCase__ : int = fx_model_loaded(**__lowerCamelCase ).to_tuple() self.assertEqual( len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(__lowerCamelCase , __lowerCamelCase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pt_model_class(__lowerCamelCase ).eval() lowerCamelCase__ : Tuple = model_class(__lowerCamelCase , dtype=jnp.floataa ) lowerCamelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCamelCase , fx_model.params ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pt_inputs["input_ids"].shape lowerCamelCase__ : Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__lowerCamelCase ): lowerCamelCase__ : Any = 0 lowerCamelCase__ : Any = 1 lowerCamelCase__ : Union[str, Any] = 0 lowerCamelCase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase__ : Dict = pt_model(**__lowerCamelCase ).to_tuple() lowerCamelCase__ : List[str] = fx_model(**__lowerCamelCase ).to_tuple() self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any = pt_model_class.from_pretrained(__lowerCamelCase , from_flax=__lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Optional[int] = pt_model_loaded(**__lowerCamelCase ).to_tuple() self.assertEqual( len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" ) lowerCamelCase__ : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase )
184
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCAmelCase = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
355
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase = object() # For specifying empty leaf dict `{}` _lowerCAmelCase = object() def UpperCamelCase ( a , a ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(a ) - len(a ) + 1 ): __magic_name__ = [x.match(a ) for x, y in zip(a , ks[i:] )] if matches and all(a ): return True return False def UpperCamelCase ( a ) -> Tuple: '''simple docstring''' def replace(a , a ): for rule, replacement in rules: if _match(a , a ): return replacement return val return replace def UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , a )), (("transformer", "wte", "embedding"), P('''mp''' , a )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , a )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(a , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , a )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def UpperCamelCase ( a ) -> str: '''simple docstring''' __magic_name__ = _get_partition_rules() __magic_name__ = _replacement_rules(a ) __magic_name__ = {k: _unmatched for k in flatten_dict(a )} __magic_name__ = {k: replace(a , a ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(a ) )
98
0
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # Load checkpoint SCREAMING_SNAKE_CASE_: List[str] = torch.load(_UpperCAmelCase , map_location="cpu" ) SCREAMING_SNAKE_CASE_: List[str] = chkpt["model"] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE_: Optional[Any] = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE_: List[Any] = v else: SCREAMING_SNAKE_CASE_: Tuple = v SCREAMING_SNAKE_CASE_: str = chkpt["params"] SCREAMING_SNAKE_CASE_: List[Any] = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )} SCREAMING_SNAKE_CASE_: Tuple = chkpt["dico_word2id"] SCREAMING_SNAKE_CASE_: Dict = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE_: int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME SCREAMING_SNAKE_CASE_: Any = pytorch_dump_folder_path + "/" + CONFIG_NAME SCREAMING_SNAKE_CASE_: List[Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(_UpperCAmelCase , indent=2 ) + "\n" ) print(f"Save vocab file to {pytorch_config_dump_path}" ) with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(_UpperCAmelCase , indent=2 ) + "\n" ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : int = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
13
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length return scores def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = 20 SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__) # tweak scores to not be uniform anymore SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1) SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3) SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1) SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3)) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min()) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[str] = None SCREAMING_SNAKE_CASE_: str = 10 SCREAMING_SNAKE_CASE_: Tuple = 2 # create ramp distribution SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True]) # check special case SCREAMING_SNAKE_CASE_: Any = 5 SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3) SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy() SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2]) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Tuple = None SCREAMING_SNAKE_CASE_: Dict = 10 SCREAMING_SNAKE_CASE_: Dict = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8) SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # check edge cases with negative and extreme logits SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0) SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2]) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = 20 SCREAMING_SNAKE_CASE_: List[str] = 4 SCREAMING_SNAKE_CASE_: Optional[int] = 0 SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) # check that min length is applied at length 5 SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20) SCREAMING_SNAKE_CASE_: int = 5 SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = 15 SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: int = 20 SCREAMING_SNAKE_CASE_: str = 4 SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) # check that all scores are -inf except the bos_token_id score SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20) SCREAMING_SNAKE_CASE_: List[str] = 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 SCREAMING_SNAKE_CASE_: List[Any] = 3 SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Any = 20 SCREAMING_SNAKE_CASE_: Optional[Any] = 4 SCREAMING_SNAKE_CASE_: Dict = 0 SCREAMING_SNAKE_CASE_: List[Any] = 5 SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) # check that all scores are -inf except the eos_token_id when max_length is reached SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20) SCREAMING_SNAKE_CASE_: Optional[int] = 4 SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached SCREAMING_SNAKE_CASE_: List[str] = 3 SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: int = 4 SCREAMING_SNAKE_CASE_: List[Any] = 10 SCREAMING_SNAKE_CASE_: int = 15 SCREAMING_SNAKE_CASE_: Dict = 2 SCREAMING_SNAKE_CASE_: int = 1 SCREAMING_SNAKE_CASE_: List[Any] = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = input_ids.copy() SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 10 # no processor list SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # with processor list SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist()) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[int] = 4 SCREAMING_SNAKE_CASE_: int = 10 SCREAMING_SNAKE_CASE_: List[str] = 15 SCREAMING_SNAKE_CASE_: List[Any] = 2 SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 SCREAMING_SNAKE_CASE_: str = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy() SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 10 # no processor list def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) return scores # with processor list def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) return scores SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
13
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : List[str] = RobertaTokenizer lowerCAmelCase : Tuple = RobertaTokenizerFast lowerCAmelCase : str = True lowerCAmelCase : Union[str, Any] = {'cls_token': '<s>'} def __lowercase ( self : Tuple ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : Union[str, Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _a : List[str] = dict(zip(_UpperCAmelCase ,range(len(_UpperCAmelCase ) ) ) ) _a : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _a : List[Any] = {'unk_token': '<unk>'} _a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) _a : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(_UpperCAmelCase ) ) def __lowercase ( self : int ,**_UpperCAmelCase : Tuple ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,**_UpperCAmelCase : List[str] ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : str ): _a : Union[str, Any] = 'lower newer' _a : str = 'lower newer' return input_text, output_text def __lowercase ( self : Optional[Any] ): _a : str = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) _a : str = 'lower newer' _a : Tuple = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _a : Tuple = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : List[Any] = tokens + [tokenizer.unk_token] _a : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=_UpperCAmelCase ) ,[0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=_UpperCAmelCase ) ,[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] ,) @slow def __lowercase ( self : int ): _a : Tuple = self.tokenizer_class.from_pretrained('roberta-base' ) _a : Any = tokenizer.encode('sequence builders' ,add_special_tokens=_UpperCAmelCase ) _a : Dict = tokenizer.encode('multi-sequence build' ,add_special_tokens=_UpperCAmelCase ) _a : Dict = tokenizer.encode( 'sequence builders' ,add_special_tokens=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ) _a : str = tokenizer.encode( 'sequence builders' ,'multi-sequence build' ,add_special_tokens=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ) _a : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _a : Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowercase ( self : List[str] ): _a : str = self.get_tokenizer() _a : List[Any] = 'Encode this sequence.' _a : Any = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _a : List[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ) _a : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[int] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ) _a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _a : Any = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_UpperCAmelCase ,_UpperCAmelCase ) # Testing spaces after special tokens _a : Optional[int] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase )} ) # mask token has a left space _a : Dict = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) _a : List[str] = 'Encode <mask> sequence' _a : List[Any] = 'Encode <mask>sequence' _a : Tuple = tokenizer.encode(_UpperCAmelCase ) _a : List[Any] = encoded.index(_UpperCAmelCase ) _a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ) _a : str = encoded.index(_UpperCAmelCase ) _a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Dict ): pass def __lowercase ( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Dict = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = 'A, <mask> AllenNLP sentence.' _a : Optional[int] = tokenizer_r.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ) _a : Dict = tokenizer_p.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,) _a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _a : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( _UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def __lowercase ( self : Tuple ): for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _a : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,_UpperCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] ,_UpperCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Union[str, Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _a : Union[str, Any] = F"""{text_of_1_token} {text_of_1_token}""" _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : str = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : Tuple = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : Any = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : str = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : List[Any] = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : Dict = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : str = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : List[Any] = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : Optional[Any] = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : List[str] = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,) _a : int = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,use_fast=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ) _a : Any = tokenizer_r(_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) ,)
107
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowerCAmelCase = pytest.mark.integration __lowerCAmelCase = {'''comet'''} __lowerCAmelCase = importlib.util.find_spec('''fairseq''') is not None __lowerCAmelCase = {'''code_eval'''} __lowerCAmelCase = os.name == '''nt''' __lowerCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''} __lowerCAmelCase = importlib.util.find_spec('''transformers''') is not None def __lowerCamelCase ( lowerCAmelCase_ ) -> Any: @wraps(lowerCAmelCase_ ) def wrapper(self , lowerCAmelCase_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , lowerCAmelCase_ ) return wrapper def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: @wraps(lowerCAmelCase_ ) def wrapper(self , lowerCAmelCase_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , lowerCAmelCase_ ) return wrapper def __lowerCamelCase ( lowerCAmelCase_ ) -> int: @wraps(lowerCAmelCase_ ) def wrapper(self , lowerCAmelCase_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , lowerCAmelCase_ ) return wrapper def __lowerCamelCase ( ) -> Tuple: _a : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) @local class __magic_name__ ( parameterized.TestCase ): lowerCAmelCase : List[str] = {} lowerCAmelCase : Optional[int] = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[Any] ): _a : Tuple = '[...]' _a : Dict = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path ) _a : Optional[int] = datasets.load.import_main_class(metric_module.__name__ ,dataset=_UpperCAmelCase ) # check parameters _a : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(_UpperCAmelCase ,metric_module.__name__ ): with self.use_local_metrics(): try: _a : Optional[int] = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @slow def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ): _a : Tuple = '[...]' _a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path ) # run doctest with self.use_local_metrics(): _a : int = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase ) self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @contextmanager def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ): yield else: yield @contextmanager def __lowercase ( self : Optional[int] ): def load_local_metric(_UpperCAmelCase : Tuple ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Tuple ): return load_metric(os.path.join('metrics' ,_UpperCAmelCase ) ,*_UpperCAmelCase ,**_UpperCAmelCase ) with patch('datasets.load_metric' ) as mock_load_metric: _a : Any = load_local_metric yield @classmethod def __lowercase ( cls : str ,_UpperCAmelCase : List[str] ): def wrapper(_UpperCAmelCase : int ): _a : Optional[Any] = contextmanager(_UpperCAmelCase ) _a : Optional[int] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __magic_name__ ( _UpperCamelCase ): def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ): assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _a : int = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: import torch def bert_cos_score_idf(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _a : Any = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict: def load_from_checkpoint(lowerCAmelCase_ ): class __magic_name__ : def __lowercase ( self : str ,_UpperCAmelCase : Dict ,*_UpperCAmelCase : int ,**_UpperCAmelCase : str ): assert len(_UpperCAmelCase ) == 2 _a : Dict = [0.19, 0.92] return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _a : Any = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _a : Optional[Any] = load_from_checkpoint yield def __lowerCamelCase ( ) -> Tuple: _a : Dict = load_metric(os.path.join('metrics' , 'seqeval' ) ) _a : Optional[int] = 'ERROR' _a : Optional[Any] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
107
1
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def __lowerCamelCase ( lowerCamelCase__ : List[Any] ): '''simple docstring''' lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ): '''simple docstring''' lowerCamelCase = emb.weight.shape lowerCamelCase = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) lowerCamelCase = emb.weight.data return lin_layer def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]="facebook/mbart-large-en-ro" , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[str]=False ): '''simple docstring''' lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ ) if mbart_aa and finetuned: lowerCamelCase = """relu""" lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase = MBartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if finetuned: lowerCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") UpperCAmelCase : Optional[int] = parser.parse_args() UpperCAmelCase : Optional[Any] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
252
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def snake_case_ ( ): """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
200
0
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 3 , UpperCamelCase__ = 7 , UpperCamelCase__ = 1_0_0_0_0_0_0 ): UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : int = 1 for current_denominator in range(1 , limit + 1 ): UpperCAmelCase__ : Tuple = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase__ : Optional[int] = current_numerator UpperCAmelCase__ : Tuple = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_00_00_00))
283
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""") UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""") UpperCAmelCase__ : Optional[Any] = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids UpperCAmelCase__ : Tuple = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase).loss UpperCAmelCase__ : Tuple = -tf.math.reduce_mean(_lowerCamelCase).numpy() UpperCAmelCase__ : Optional[int] = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
283
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __snake_case = logging.get_logger(__name__) __snake_case = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class _lowerCAmelCase ( snake_case_ ): def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , *UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) if config is None: assert isinstance(self.model , UpperCamelCase__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F' {self.model.__class__}' ) snake_case : Tuple = self.model.config else: snake_case : Tuple = config snake_case : List[Any] = data_args snake_case : Optional[int] = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for' " padding.." ) if self.args.label_smoothing == 0: snake_case : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case : Union[str, Any] = label_smoothed_nll_loss def lowerCamelCase ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' if self.optimizer is None: snake_case : Any = ["bias", "LayerNorm.weight"] snake_case : str = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] snake_case : Optional[Any] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case : int = Adafactor snake_case : Optional[int] = {"scale_parameter": False, "relative_step": False} else: snake_case : int = AdamW snake_case : Optional[Any] = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } snake_case : Any = self.args.learning_rate if self.sharded_ddp: snake_case : int = OSS( params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , ) else: snake_case : str = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__ ) if self.lr_scheduler is None: snake_case : Union[str, Any] = self._get_lr_scheduler(UpperCamelCase__ ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' snake_case : Any = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case : str = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case : Dict = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case : List[Any] = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__ ) return scheduler def lowerCamelCase ( self ) -> Optional[torch.utils.data.Sampler]: '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case : List[Any] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0] snake_case : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case ,snake_case : List[Any] = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__ )[:2] else: # compute label smoothed loss snake_case : Optional[Any] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0] snake_case : Tuple = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) snake_case ,snake_case : Optional[int] = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : Union[str, Any] = inputs.pop("labels" ) snake_case ,snake_case : Any = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return loss def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: '''simple docstring''' snake_case : List[Any] = self._prepare_inputs(UpperCamelCase__ ) snake_case : int = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case : Union[str, Any] = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCamelCase__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case : Tuple = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["max_length"] ) snake_case : Any = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data snake_case ,snake_case : int = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case : Any = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case : Tuple = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case : str = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["max_length"] ) return (loss, logits, labels) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' snake_case : List[str] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F' padded to `max_length`={max_length}' ) snake_case : List[str] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case : Tuple = tensor return padded_tensor
203
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _lowerCAmelCase ( snake_case_ ): def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if tokenize_kwargs is None: snake_case : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) snake_case : List[str] = truncation snake_case : Union[str, Any] = tokenize_kwargs snake_case : List[Any] = {} if return_tensors is not None: snake_case : Tuple = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : List[Any] = self.framework snake_case : str = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' snake_case : int = self.model(**UpperCamelCase__ ) return model_outputs def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
203
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict: '''simple docstring''' lowercase_ = 1 lowercase_ = 2 while i * i <= n: lowercase_ = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _SCREAMING_SNAKE_CASE () -> int: '''simple docstring''' lowercase_ = 1 lowercase_ = 1 while True: i += 1 t_num += i if count_divisors(__lowerCAmelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
350
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[Any] = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class SCREAMING_SNAKE_CASE__ : lowercase__ = PegasusConfig lowercase__ = {} lowercase__ = "gelu" def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ): """simple docstring""" lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = eos_token_id lowercase_ = pad_token_id lowercase_ = bos_token_id def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) return config, inputs_dict def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]): """simple docstring""" lowercase_ = 2_0 lowercase_ = model_class_name(lowerCAmelCase_) lowercase_ = model.encode(inputs_dict["""input_ids"""]) lowercase_ , lowercase_ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase_ = model.decode( decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , ) lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase_ = model.decode( decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , ) lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''') def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict): """simple docstring""" lowercase_ = 2_0 lowercase_ = model_class_name(lowerCAmelCase_) lowercase_ = model.encode(inputs_dict["""input_ids"""]) lowercase_ , lowercase_ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowercase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase_ = model.decode( decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , ) lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase_ = model.decode( decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , ) lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_) lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''') def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' if attention_mask is None: lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowercase_ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ): lowercase__ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowercase__ = True lowercase__ = False lowercase__ = False lowercase__ = False def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = FlaxPegasusModelTester(self) lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_) def _UpperCAmelCase ( self : Any): """simple docstring""" self.config_tester.run_common_tests() def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_) lowercase_ = model_class(lowerCAmelCase_) @jax.jit def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]): return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_) with self.subTest("""JIT Enabled"""): lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple() self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_)) for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): lowercase_ = model_class(lowerCAmelCase_) lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""]) lowercase_ = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict): return model.decode( decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , ) with self.subTest("""JIT Enabled"""): lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple() self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_)) for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_): self.assertEqual(jitted_output.shape , output.shape) @slow def _UpperCAmelCase ( self : Tuple): """simple docstring""" for model_class_name in self.all_model_classes: lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_) lowercase_ = np.ones((1, 1)) lowercase_ = model(lowerCAmelCase_) self.assertIsNotNone(lowerCAmelCase_) @slow def _UpperCAmelCase ( self : Any): """simple docstring""" lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""") lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""") lowercase_ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] lowercase_ = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_) lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_) assert tgt_text == decoded
313
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ : int ) -> list[int]: if length <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(lowerCAmelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
45
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]: if isinstance(lowercase ,torch.Tensor ): return image elif isinstance(lowercase ,PIL.Image.Image ): snake_case : str = [image] if isinstance(image[0] ,PIL.Image.Image ): snake_case : List[Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] snake_case : Optional[int] = np.concatenate(lowercase ,axis=0 ) snake_case : str = np.array(lowercase ).astype(np.floataa ) / 255.0 snake_case : List[str] = image.transpose(0 ,3 ,1 ,2 ) snake_case : Any = 2.0 * image - 1.0 snake_case : Optional[Any] = torch.from_numpy(lowercase ) elif isinstance(image[0] ,torch.Tensor ): snake_case : Optional[int] = torch.cat(lowercase ,dim=0 ) return image def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=0.9995 ) -> Optional[int]: if not isinstance(lowercase ,np.ndarray ): snake_case : Any = True snake_case : str = va.device snake_case : Optional[Any] = va.cpu().numpy() snake_case : str = va.cpu().numpy() snake_case : Tuple = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) ) if np.abs(lowercase ) > DOT_THRESHOLD: snake_case : Optional[int] = (1 - t) * va + t * va else: snake_case : List[Any] = np.arccos(lowercase ) snake_case : str = np.sin(lowercase ) snake_case : int = theta_a * t snake_case : Dict = np.sin(lowercase ) snake_case : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a snake_case : Union[str, Any] = sin_theta_t / sin_theta_a snake_case : Union[str, Any] = sa * va + sa * va if inputs_are_torch: snake_case : List[Any] = torch.from_numpy(lowercase ).to(lowercase ) return va def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple: snake_case : Dict = F.normalize(lowercase ,dim=-1 ) snake_case : Optional[Any] = F.normalize(lowercase ,dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str: for param in model.parameters(): snake_case : Tuple = value class __lowercase (UpperCamelCase__ ): """simple docstring""" def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> List[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , ) snake_case : Optional[int] = ( feature_extractor.size if isinstance(feature_extractor.size , A ) else feature_extractor.size["""shortest_edge"""] ) snake_case : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , A ) set_requires_grad(self.clip_model , A ) def UpperCAmelCase ( self , A = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory snake_case : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def UpperCAmelCase ( self ) -> Optional[int]: self.enable_attention_slicing(A ) def UpperCAmelCase ( self ) -> Any: set_requires_grad(self.vae , A ) def UpperCAmelCase ( self ) -> List[Any]: set_requires_grad(self.vae , A ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.unet , A ) def UpperCAmelCase ( self ) -> Tuple: set_requires_grad(self.unet , A ) def UpperCAmelCase ( self , A , A , A ) -> Dict: # get the original timestep using init_timestep snake_case : Tuple = min(int(num_inference_steps * strength ) , A ) snake_case : List[str] = max(num_inference_steps - init_timestep , 0 ) snake_case : List[str] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , A , A , A , A , A , A=None ) -> List[str]: if not isinstance(A , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(A )}""" ) snake_case : str = image.to(device=A , dtype=A ) if isinstance(A , A ): snake_case : int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A ) ] snake_case : str = torch.cat(A , dim=0 ) else: snake_case : List[Any] = self.vae.encode(A ).latent_dist.sample(A ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case : Dict = 0.1_82_15 * init_latents snake_case : Tuple = init_latents.repeat_interleave(A , dim=0 ) snake_case : Optional[int] = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A ) # get latents snake_case : Union[str, Any] = self.scheduler.add_noise(A , A , A ) snake_case : List[Any] = init_latents return latents def UpperCAmelCase ( self , A ) -> int: snake_case : Optional[Any] = self.coca_transform(A ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): snake_case : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) snake_case : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def UpperCAmelCase ( self , A , A ) -> List[Any]: snake_case : Tuple = self.feature_extractor.preprocess(A ) snake_case : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() snake_case : Optional[int] = self.clip_model.get_image_features(A ) snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A ) snake_case : Tuple = image_embeddings_clip.repeat_interleave(A , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , A , A , A , A , A , A , A , ) -> Any: snake_case : Dict = latents.detach().requires_grad_() snake_case : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual snake_case : str = self.unet(A , A , encoder_hidden_states=A ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): snake_case : int = self.scheduler.alphas_cumprod[timestep] snake_case : Tuple = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 snake_case : str = torch.sqrt(A ) snake_case : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , A ): snake_case : int = self.scheduler.sigmas[index] snake_case : List[Any] = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case : List[str] = 1 / 0.1_82_15 * sample snake_case : str = self.vae.decode(A ).sample snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 ) snake_case : str = transforms.Resize(self.feature_extractor_size )(A ) snake_case : Dict = self.normalize(A ).to(latents.dtype ) snake_case : Union[str, Any] = self.clip_model.get_image_features(A ) snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A ) snake_case : Optional[int] = spherical_dist_loss(A , A ).mean() * clip_guidance_scale snake_case : int = -torch.autograd.grad(A , A )[0] if isinstance(self.scheduler , A ): snake_case : Union[str, Any] = latents.detach() + grads * (sigma**2) snake_case : Union[str, Any] = noise_pred_original else: snake_case : List[str] = noise_pred_original - torch.sqrt(A ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , A , A , A = None , A = None , A = 5_1_2 , A = 5_1_2 , A = 0.6 , A = 5_0 , A = 7.5 , A = 1 , A = 0.0 , A = 1_0_0 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> Union[str, Any]: if isinstance(A , A ) and len(A ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(A )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(A , torch.Generator ) and batch_size > 1: snake_case : Dict = [generator] + [None] * (batch_size - 1) snake_case : Tuple = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] snake_case : List[str] = [x[0] for x in coca_is_none if x[1]] snake_case : Optional[int] = """, """.join(A ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(A ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) snake_case : Tuple = self.get_image_description(A ) if style_prompt is None: if len(A ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) snake_case : List[Any] = self.get_image_description(A ) # get prompt text embeddings for content and style snake_case : Dict = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , ) snake_case : str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] snake_case : Dict = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , ) snake_case : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] snake_case : List[str] = slerp(A , A , A ) # duplicate text embeddings for each generation per prompt snake_case : List[Any] = text_embeddings.repeat_interleave(A , dim=0 ) # set timesteps snake_case : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) snake_case : Optional[Any] = {} if accepts_offset: snake_case : Dict = 1 self.scheduler.set_timesteps(A , **A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) snake_case , snake_case : List[Any] = self.get_timesteps(A , A , self.device ) snake_case : List[str] = timesteps[:1].repeat(A ) # Preprocess image snake_case : Dict = preprocess(A , A , A ) snake_case : List[Any] = self.prepare_latents( A , A , A , text_embeddings.dtype , self.device , A ) snake_case : Optional[int] = preprocess(A , A , A ) snake_case : Optional[Any] = self.prepare_latents( A , A , A , text_embeddings.dtype , self.device , A ) snake_case : str = slerp(A , A , A ) if clip_guidance_scale > 0: snake_case : List[Any] = self.get_clip_image_embeddings(A , A ) snake_case : Any = self.get_clip_image_embeddings(A , A ) snake_case : Tuple = slerp( A , A , A ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. snake_case : Optional[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: snake_case : List[str] = content_text_input.input_ids.shape[-1] snake_case : Any = self.tokenizer([""""""] , padding="""max_length""" , max_length=A , return_tensors="""pt""" ) snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt snake_case : Tuple = uncond_embeddings.repeat_interleave(A , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case : str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. snake_case : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) snake_case : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps snake_case : List[Any] = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: snake_case : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) snake_case : Union[str, Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler snake_case : Dict = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) snake_case : Dict = {} if accepts_eta: snake_case : Union[str, Any] = eta # check if the scheduler accepts generator snake_case : List[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: snake_case : List[str] = generator with self.progress_bar(total=A ): for i, t in enumerate(A ): # expand the latents if we are doing classifier free guidance snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents snake_case : List[str] = self.scheduler.scale_model_input(A , A ) # predict the noise residual snake_case : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform classifier free guidance if do_classifier_free_guidance: snake_case , snake_case : int = noise_pred.chunk(2 ) snake_case : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: snake_case : Any = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) snake_case , snake_case : List[Any] = self.cond_fn( A , A , A , A , A , A , A , ) # compute the previous noisy sample x_t -> x_t-1 snake_case : Tuple = self.scheduler.step(A , A , A , **A ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case : str = 1 / 0.1_82_15 * latents snake_case : Optional[Any] = self.vae.decode(A ).sample snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case : Tuple = self.numpy_to_pil(A ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
176
import functools def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int: # Validation if not isinstance(lowercase ,lowercase ) or not all(isinstance(lowercase ,lowercase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(lowercase ) != 3 or not all(isinstance(lowercase ,lowercase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(lowercase ) >= 366: raise ValueError("""All days elements should be less than 366""" ) snake_case : List[str] = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
176
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowercase): UpperCAmelCase__ : str = ['input_features', 'attention_mask'] def __init__( self: List[Any] , UpperCamelCase_: int=80 , UpperCamelCase_: int=1_60_00 , UpperCamelCase_: Any=80 , UpperCamelCase_: str=0.0 , UpperCamelCase_: Any=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[int]=True , **UpperCamelCase_: int , ): super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = num_mel_bins __lowerCamelCase = do_ceptral_normalize __lowerCamelCase = normalize_means __lowerCamelCase = normalize_vars __lowerCamelCase = True def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: np.ndarray , ): __lowerCamelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ) __lowerCamelCase = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowerCAmelCase__ ( UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __lowerCamelCase = x[:input_length].mean(axis=0 ) __lowerCamelCase = np.subtract(UpperCamelCase_ , UpperCamelCase_ ) if normalize_vars: __lowerCamelCase = x[:input_length].std(axis=0 ) __lowerCamelCase = np.divide(UpperCamelCase_ , UpperCamelCase_ ) if input_length < x.shape[0]: __lowerCamelCase = padding_value # make sure array is in float32 __lowerCamelCase = x.astype(np.floataa ) return x def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[np.ndarray] , UpperCamelCase_: Optional[np.ndarray] = None ): __lowerCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(UpperCamelCase_ , UpperCamelCase_ ) ] def __call__( self: List[str] , UpperCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , **UpperCamelCase_: List[str] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __lowerCamelCase = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) __lowerCamelCase = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCamelCase = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): __lowerCamelCase = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCamelCase = [raw_speech] # extract fbank features __lowerCamelCase = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech] # convert into correct format for padding __lowerCamelCase = BatchFeature({"""input_features""": features} ) __lowerCamelCase = self.pad( UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) # make sure list is in array format __lowerCamelCase = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , UpperCamelCase_ ): __lowerCamelCase = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features] __lowerCamelCase = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __lowerCamelCase = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __lowerCamelCase = ( np.array(UpperCamelCase_ , dtype=np.intaa ) if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowerCamelCase = self.normalize( padded_inputs["""input_features"""] , attention_mask=UpperCamelCase_ ) if return_tensors is not None: __lowerCamelCase = padded_inputs.convert_to_tensors(UpperCamelCase_ ) return padded_inputs
12
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): lowercase : int = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: lowercase : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any: '''simple docstring''' __UpperCamelCase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1) __UpperCamelCase : int = images.cpu().permute(0 , 2 , 3 , 1).float().numpy() __UpperCamelCase : Dict = numpy_to_pil(_lowerCamelCase) return images def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Optional[Any]: '''simple docstring''' if images.ndim == 3: __UpperCamelCase : Optional[int] = images[None, ...] __UpperCamelCase : Dict = (images * 255).round().astype("uint8") if images.shape[-1] == 1: # special case for grayscale (single channel) images __UpperCamelCase : int = [Image.fromarray(image.squeeze() , mode="L") for image in images] else: __UpperCamelCase : Union[str, Any] = [Image.fromarray(_lowerCamelCase) for image in images] return pil_images
232
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) ) _lowerCamelCase : List[str] = FileLock(str(tmpdir / "foo.lock" ) ) _lowerCamelCase : List[str] = 0.0_1 with locka.acquire(): with pytest.raises(_lowerCamelCase ): _lowerCamelCase : Dict = time.time() locka.acquire(_lowerCamelCase ) assert time.time() - _start > timeout def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : str = "a" * 1000 + ".lock" _lowerCamelCase : Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(_lowerCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 _lowerCamelCase : Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCamelCase ): locka.acquire(0 )
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( a_ : List[Any] ) -> Any: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( a_ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE :Tuple = create_tensor(a_ ) __SCREAMING_SNAKE_CASE :Dict = gather(a_ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( a_ : Dict ) -> Any: __SCREAMING_SNAKE_CASE :Any = [state.process_index] __SCREAMING_SNAKE_CASE :Tuple = gather_object(a_ ) assert len(a_ ) == state.num_processes, f'''{gathered_obj}, {len(a_ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( a_ : Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE :Any = create_tensor(a_ ) __SCREAMING_SNAKE_CASE :int = broadcast(a_ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( a_ : List[str] ) -> Any: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: __SCREAMING_SNAKE_CASE :Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device ) else: __SCREAMING_SNAKE_CASE :Optional[int] = torch.arange(state.num_processes ).to(state.device ) __SCREAMING_SNAKE_CASE :List[Any] = pad_across_processes(a_ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( a_ : Tuple ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return __SCREAMING_SNAKE_CASE :Any = create_tensor(a_ ) __SCREAMING_SNAKE_CASE :Any = reduce(a_ , '''sum''' ) __SCREAMING_SNAKE_CASE :List[str] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(a_ , a_ ), f'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( a_ : int ) -> List[str]: # For now runs on only two processes if state.num_processes != 2: return __SCREAMING_SNAKE_CASE :Optional[Any] = create_tensor(a_ ) __SCREAMING_SNAKE_CASE :str = reduce(a_ , '''mean''' ) __SCREAMING_SNAKE_CASE :Any = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(a_ , a_ ), f'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( a_ : str ) -> Optional[Any]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: __SCREAMING_SNAKE_CASE :List[Any] = PartialState() state.print(f'''State: {state}''' ) state.print('''testing gather''' ) test_gather(a_ ) state.print('''testing gather_object''' ) test_gather_object(a_ ) state.print('''testing broadcast''' ) test_broadcast(a_ ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(a_ ) state.print('''testing reduce_sum''' ) test_reduce_sum(a_ ) state.print('''testing reduce_mean''' ) test_reduce_mean(a_ ) if __name__ == "__main__": main()
191
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) lowerCamelCase_ = logging.getLogger(__name__) def __lowerCamelCase ( ) -> int: __SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=a_ , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=a_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=a_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=a_ , default='''data/dump''' , help='''The dump file prefix.''' ) __SCREAMING_SNAKE_CASE :Any = parser.parse_args() logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": __SCREAMING_SNAKE_CASE :Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` __SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": __SCREAMING_SNAKE_CASE :str = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''cls_token'''] # `<s>` __SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": __SCREAMING_SNAKE_CASE :Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` __SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(f'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: __SCREAMING_SNAKE_CASE :Union[str, Any] = fp.readlines() logger.info('''Start encoding''' ) logger.info(f'''{len(a_ )} examples to process.''' ) __SCREAMING_SNAKE_CASE :Optional[int] = [] __SCREAMING_SNAKE_CASE :List[str] = 0 __SCREAMING_SNAKE_CASE :Optional[Any] = 1_00_00 __SCREAMING_SNAKE_CASE :List[Any] = time.time() for text in data: __SCREAMING_SNAKE_CASE :Any = f'''{bos} {text.strip()} {sep}''' __SCREAMING_SNAKE_CASE :int = tokenizer.encode(a_ , add_special_tokens=a_ ) rslt.append(a_ ) iter += 1 if iter % interval == 0: __SCREAMING_SNAKE_CASE :Any = time.time() logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) __SCREAMING_SNAKE_CASE :Any = time.time() logger.info('''Finished binarization''' ) logger.info(f'''{len(a_ )} examples processed.''' ) __SCREAMING_SNAKE_CASE :Optional[int] = f'''{args.dump_file}.{args.tokenizer_name}.pickle''' __SCREAMING_SNAKE_CASE :str = tokenizer.vocab_size if vocab_size < (1 << 16): __SCREAMING_SNAKE_CASE :Union[str, Any] = [np.uintaa(a_ ) for d in rslt] else: __SCREAMING_SNAKE_CASE :List[Any] = [np.intaa(a_ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'''Dump to {dp_file}''' ) with open(a_ , '''wb''' ) as handle: pickle.dump(rslt_ , a_ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
191
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" debug_launcher(test_script.main ) def _lowercase ( self ): """simple docstring""" debug_launcher(test_ops.main )
354
'''simple docstring''' import os def A (): with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as f: _lowerCAmelCase = [] # noqa: E741 for _ in range(20 ): l.append([int(__lowerCamelCase ) for x in f.readline().split()] ) _lowerCAmelCase = 0 # right for i in range(20 ): for j in range(17 ): _lowerCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _lowerCAmelCase = temp # down for i in range(17 ): for j in range(20 ): _lowerCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _lowerCAmelCase = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _lowerCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _lowerCAmelCase = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _lowerCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _lowerCAmelCase = temp return maximum if __name__ == "__main__": print(solution())
229
0
def __lowercase ( _A ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") UpperCAmelCase__ : Optional[int] = int(input("""Enter number: """).strip()) print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
245
import re from filelock import FileLock try: import nltk UpperCAmelCase__ : Tuple = True except (ImportError, ModuleNotFoundError): UpperCAmelCase__ : Optional[Any] = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __lowercase ( _A ) -> str: re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_A ) )
245
1
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCAmelCase__ : def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=1_3 , snake_case__ : Any=7 , snake_case__ : str=False , snake_case__ : Union[str, Any]=True , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : List[Any]=1_9 , snake_case__ : List[str]=3_2 , snake_case__ : int=5 , snake_case__ : Dict=4 , snake_case__ : str=3_7 , snake_case__ : Any="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=1_6 , snake_case__ : List[Any]=2 , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=4 , snake_case__ : List[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : Optional[Any] = seq_length UpperCAmelCase__ : List[Any] = is_training UpperCAmelCase__ : Tuple = use_input_mask UpperCAmelCase__ : Dict = use_token_type_ids UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : Optional[Any] = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = max_position_embeddings UpperCAmelCase__ : Optional[Any] = type_vocab_size UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Tuple = num_choices UpperCAmelCase__ : Optional[Any] = scope def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : str = None UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def __a ( self : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = EsmForProteinFolding(config=snake_case__ ).float() model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase__ : Tuple = model(snake_case__ ) UpperCAmelCase__ : List[str] = model(snake_case__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase__ ) : List[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =(EsmForProteinFolding,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={} if is_torch_available() else {} SCREAMING_SNAKE_CASE_ =False def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = EsmFoldModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @unittest.skip("Does not support attention outputs" ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMFold does not support passing input embeds!" ) def __a ( self : int ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold only has one output format." ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold does not support input chunking." ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : Tuple ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support data parallel." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self : Dict ): '''simple docstring''' pass @require_torch class lowerCAmelCase__ ( __magic_name__ ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() UpperCAmelCase__ : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) UpperCAmelCase__ : Any = model(snake_case__ )["positions"] UpperCAmelCase__ : Tuple = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case__ , atol=1e-4 ) )
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ : Dict = logging.get_logger("""transformers.models.speecht5""") UpperCAmelCase__ : List[Any] = { """speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""", """speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""", """speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""", """speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""", } UpperCAmelCase__ : str = { """text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""", """text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""", } UpperCAmelCase__ : List[str] = { """speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""", """speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""", """speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""", """speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""", """speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""", } UpperCAmelCase__ : int = { """speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""", """speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""", """speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""", """speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""", """speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""", """speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""", """speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""", """speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""", """speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""", """speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""", """speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""", """speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""", } UpperCAmelCase__ : Union[str, Any] = { """text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""", } UpperCAmelCase__ : Union[str, Any] = { """text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""", } UpperCAmelCase__ : Tuple = { """encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""", """encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""", """encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""", """encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""", """encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""", """encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""", """encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""", """encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""", """encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""", } UpperCAmelCase__ : Union[str, Any] = { """decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""", """decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""", """decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""", """decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""", """decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""", """decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""", """decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""", """decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""", """decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""", """decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""", """decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""", """decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""", """decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""", } UpperCAmelCase__ : List[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ : Union[str, Any] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ : Optional[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = [ """encoder.version""", """encoder.layers.*.norm_k.weight""", """encoder.layers.*.norm_k.bias""", """decoder.version""", """decoder.layers.*.norm_k.weight""", """decoder.layers.*.norm_k.bias""", """decoder.pos_emb.pe_k""", """speech_encoder_prenet.embed_positions._float_tensor""", """text_decoder_prenet.embed_positions._float_tensor""", ] UpperCAmelCase__ : Any = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """speech_decoder_prenet.*""", """speech_decoder_postnet.*""", ] UpperCAmelCase__ : List[str] = IGNORE_KEYS + [ """encoder.proj""", """speech_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] UpperCAmelCase__ : Optional[int] = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] def __lowercase ( _A , _A , _A , _A , _A ) -> Optional[int]: for attribute in key.split(""".""" ): SCREAMING_SNAKE_CASE : str = getattr(_A , _A ) if weight_type is not None: SCREAMING_SNAKE_CASE : str = getattr(_A , _A ).shape else: SCREAMING_SNAKE_CASE : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": SCREAMING_SNAKE_CASE : Dict = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE : Optional[Any] = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE : List[Any] = value elif weight_type == "bias": SCREAMING_SNAKE_CASE : int = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE : Tuple = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE : List[str] = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE : int = value else: SCREAMING_SNAKE_CASE : Optional[Any] = value logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." ) def __lowercase ( _A , _A ) -> List[str]: for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def __lowercase ( _A , _A , _A ) -> int: SCREAMING_SNAKE_CASE : Optional[Any] = [] if task == "s2t": SCREAMING_SNAKE_CASE : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE : int = MAPPING_S2T SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS_S2T elif task == "t2s": SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : List[str] = MAPPING_T2S SCREAMING_SNAKE_CASE : Optional[Any] = IGNORE_KEYS_T2S elif task == "s2s": SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE : List[Any] = MAPPING_S2S SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS_S2S else: raise ValueError(F"Unsupported task: {task}" ) for name, value in fairseq_dict.items(): if should_ignore(_A , _A ): logger.info(F"{name} was ignored" ) continue SCREAMING_SNAKE_CASE : Optional[int] = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , ) SCREAMING_SNAKE_CASE : List[str] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".*.""" ) if prefix in name and suffix in name: SCREAMING_SNAKE_CASE : int = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: SCREAMING_SNAKE_CASE : List[str] = True if "*" in mapped_key: SCREAMING_SNAKE_CASE : List[Any] = name.split(_A )[0].split(""".""" )[-2] SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace("""*""" , _A ) if "weight_g" in name: SCREAMING_SNAKE_CASE : Optional[Any] = """weight_g""" elif "weight_v" in name: SCREAMING_SNAKE_CASE : List[Any] = """weight_v""" elif "bias" in name: SCREAMING_SNAKE_CASE : Optional[int] = """bias""" elif "weight" in name: SCREAMING_SNAKE_CASE : str = """weight""" elif "running_mean" in name: SCREAMING_SNAKE_CASE : str = """running_mean""" elif "running_var" in name: SCREAMING_SNAKE_CASE : Tuple = """running_var""" elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE : int = """num_batches_tracked""" else: SCREAMING_SNAKE_CASE : Optional[Any] = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowercase ( _A , _A , _A , _A , _A ) -> Optional[int]: SCREAMING_SNAKE_CASE : Dict = full_name.split("""conv_layers.""" )[-1] SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(""".""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(items[0] ) SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) SCREAMING_SNAKE_CASE : Any = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE : Optional[int] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) SCREAMING_SNAKE_CASE : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE : str = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_A ) @torch.no_grad() def __lowercase ( _A , _A , _A , _A=None , _A=None , _A=None , ) -> Tuple: if config_path is not None: SCREAMING_SNAKE_CASE : Dict = SpeechTaConfig.from_pretrained(_A ) else: SCREAMING_SNAKE_CASE : List[str] = SpeechTaConfig() if task == "s2t": SCREAMING_SNAKE_CASE : Any = config.max_text_positions SCREAMING_SNAKE_CASE : Any = SpeechTaForSpeechToText(_A ) elif task == "t2s": SCREAMING_SNAKE_CASE : List[Any] = 1876 SCREAMING_SNAKE_CASE : Optional[int] = 600 SCREAMING_SNAKE_CASE : Any = config.max_speech_positions SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaForTextToSpeech(_A ) elif task == "s2s": SCREAMING_SNAKE_CASE : Optional[int] = 1876 SCREAMING_SNAKE_CASE : Optional[Any] = config.max_speech_positions SCREAMING_SNAKE_CASE : List[Any] = SpeechTaForSpeechToSpeech(_A ) else: raise ValueError(F"Unknown task name: {task}" ) if vocab_path: SCREAMING_SNAKE_CASE : List[Any] = SpeechTaTokenizer(_A , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken("""<mask>""" , lstrip=_A , rstrip=_A ) SCREAMING_SNAKE_CASE : Optional[int] = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) SCREAMING_SNAKE_CASE : Tuple = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaProcessor(tokenizer=_A , feature_extractor=_A ) processor.save_pretrained(_A ) SCREAMING_SNAKE_CASE : List[str] = torch.load(_A ) recursively_load_weights(fairseq_checkpoint["""model"""] , _A , _A ) model.save_pretrained(_A ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(_A ) model.push_to_hub(_A ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--task""", default="""s2t""", type=str, help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
245
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class a__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE : Dict = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 ) SCREAMING_SNAKE_CASE : str = output[output != -float("""inf""" )] SCREAMING_SNAKE_CASE : Optional[int] = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class a__ ( unittest.TestCase , UpperCAmelCase ): """simple docstring""" if is_tf_available(): UpperCAmelCase__ : Optional[Any] ={ """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def _lowercase ( self : int ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : Tuple = 2 class a__ ( tf.Module ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str: """simple docstring""" super(UpperCAmelCase__ , self ).__init__() SCREAMING_SNAKE_CASE : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCAmelCase__ , ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE : Any = [[2, 0], [1_0_2, 1_0_3]] SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE : Dict = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} ) SCREAMING_SNAKE_CASE : Optional[int] = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): SCREAMING_SNAKE_CASE : int = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } SCREAMING_SNAKE_CASE : Tuple = serving_func(**UpperCAmelCase__ )["""sequences"""] SCREAMING_SNAKE_CASE : List[str] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : int = 2 class a__ ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]: """simple docstring""" super(UpperCAmelCase__ , self ).__init__() SCREAMING_SNAKE_CASE : List[str] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCAmelCase__ , ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE : List[Any] = [[2], [1_0_2, 1_0_3]] SCREAMING_SNAKE_CASE : List[Any] = [[1], [1, 1]] SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} ) SCREAMING_SNAKE_CASE : int = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""] for input_row in range(len(UpperCAmelCase__ ) ): SCREAMING_SNAKE_CASE : str = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } SCREAMING_SNAKE_CASE : List[str] = serving_func(**UpperCAmelCase__ )["""sequences"""] SCREAMING_SNAKE_CASE : List[Any] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def _lowercase ( self : Optional[Any] ) ->Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase__ ) class a__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[Any] ) ->List[str]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , """spiece.model""" ) , """rb""" ).read() ) SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def _lowercase ( self : int , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.tokenize(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) SCREAMING_SNAKE_CASE : str = complete_model(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 1_0, """temperature""": 0.7, } SCREAMING_SNAKE_CASE : Tuple = 1_4 SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : List[Any] = """Hello, my dog is cute and""" SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ) SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : Dict = 6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE : int = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) SCREAMING_SNAKE_CASE : Dict = [6_3_8, 1_9_8] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE : Dict = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _lowercase ( self : str ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : List[Any] = """Hugging Face is a technology company based in New York and Paris.""" SCREAMING_SNAKE_CASE : Optional[int] = bart_tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids SCREAMING_SNAKE_CASE : int = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ ).numpy() class a__ ( UpperCAmelCase ): """simple docstring""" def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict ) ->List[str]: """simple docstring""" return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class a__ ( bart_model.model.encoder.__class__ ): """simple docstring""" def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]: """simple docstring""" return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) SCREAMING_SNAKE_CASE : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE : Tuple = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo="""bar""" )
245
1
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCAmelCase__ : Tuple = re.compile(R"\s+") def __UpperCamelCase ( _UpperCAmelCase ): return {"hash": hashlib.mda(re.sub(_UpperCAmelCase, "", example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : Optional[int] = [len(_UpperCAmelCase ) for line in example["content"].splitlines()] return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )} def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : int = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ): if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=5 ): __UpperCAmelCase : Optional[Any] = ["auto-generated", "autogenerated", "automatically generated"] __UpperCAmelCase : Optional[Any] = example["content"].splitlines() for _, line in zip(range(_UpperCAmelCase ), _UpperCAmelCase ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=5, _UpperCAmelCase=0.05 ): __UpperCAmelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"] __UpperCAmelCase : List[str] = example["content"].splitlines() __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : Optional[int] = 0 # first test for _, line in zip(range(_UpperCAmelCase ), _UpperCAmelCase ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test __UpperCAmelCase : Union[str, Any] = example["content"].count("\n" ) __UpperCAmelCase : List[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : List[str] = ["def ", "class ", "for ", "while "] __UpperCAmelCase : Tuple = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=4 ): __UpperCAmelCase : Dict = example["content"].splitlines() __UpperCAmelCase : Union[str, Any] = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : Tuple = tokenizer(example["content"], truncation=_UpperCAmelCase )["input_ids"] __UpperCAmelCase : Dict = len(example["content"] ) / len(_UpperCAmelCase ) return {"ratio": ratio} def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : int = {} results.update(get_hash(_UpperCAmelCase ) ) results.update(line_stats(_UpperCAmelCase ) ) results.update(alpha_stats(_UpperCAmelCase ) ) results.update(char_token_ratio(_UpperCAmelCase ) ) results.update(is_autogenerated(_UpperCAmelCase ) ) results.update(is_config_or_test(_UpperCAmelCase ) ) results.update(has_no_keywords(_UpperCAmelCase ) ) results.update(has_few_assignments(_UpperCAmelCase ) ) return results def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ): if not check_uniques(_UpperCAmelCase, _UpperCAmelCase ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCamelCase ( _UpperCAmelCase ): with open(_UpperCAmelCase, "rb" ) as f_in: with gzip.open(str(_UpperCAmelCase ) + ".gz", "wb", compresslevel=6 ) as f_out: shutil.copyfileobj(_UpperCAmelCase, _UpperCAmelCase ) os.unlink(_UpperCAmelCase ) # Settings lowerCAmelCase__ : List[Any] = HfArgumentParser(PreprocessingArguments) lowerCAmelCase__ : str = parser.parse_args() if args.num_workers is None: lowerCAmelCase__ : Optional[int] = multiprocessing.cpu_count() lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCAmelCase__ : List[Any] = time.time() lowerCAmelCase__ : Dict = load_dataset(args.dataset_name, split="train") print(f"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCAmelCase__ : Tuple = time.time() lowerCAmelCase__ : Tuple = ds.map(preprocess, num_proc=args.num_workers) print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCAmelCase__ : Any = set(ds.unique("hash")) lowerCAmelCase__ : Optional[int] = len(uniques) / len(ds) print(f"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCAmelCase__ : Tuple = time.time() lowerCAmelCase__ : List[Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(f"Time to filter dataset: {time.time()-t_start:.2f}") print(f"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCAmelCase__ : Dict = time.time() lowerCAmelCase__ , lowerCAmelCase__ : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(f"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCAmelCase__ : Optional[int] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) lowerCAmelCase__ : Any = output_dir / "data" data_dir.mkdir(exist_ok=True) lowerCAmelCase__ : Union[str, Any] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCAmelCase__ : Union[str, Any] = str(data_dir / f"file-{file_number+1:012}.json") lowerCAmelCase__ : Any = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"Time to save dataset: {time.time()-t_start:.2f}")
37
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE = 42 class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ): """simple docstring""" super().__init__() __UpperCAmelCase : str = sample_size # time if time_embedding_type == "fourier": __UpperCAmelCase : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ ) __UpperCAmelCase : str = 2 * block_out_channels[0] elif time_embedding_type == "positional": __UpperCAmelCase : str = Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ ) __UpperCAmelCase : Dict = block_out_channels[0] if use_timestep_embedding: __UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4 __UpperCAmelCase : str = TimestepEmbedding( in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , ) __UpperCAmelCase : Tuple = nn.ModuleList([] ) __UpperCAmelCase : int = None __UpperCAmelCase : Optional[Any] = nn.ModuleList([] ) __UpperCAmelCase : Dict = None # down __UpperCAmelCase : str = in_channels for i, down_block_type in enumerate(UpperCAmelCase_ ): __UpperCAmelCase : Optional[Any] = output_channel __UpperCAmelCase : Optional[int] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1 __UpperCAmelCase : List[str] = get_down_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase_ ) # mid __UpperCAmelCase : Optional[Any] = get_mid_block( UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , ) # up __UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) ) __UpperCAmelCase : Any = reversed_block_out_channels[0] if out_block_type is None: __UpperCAmelCase : Union[str, Any] = out_channels else: __UpperCAmelCase : Dict = block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase_ ): __UpperCAmelCase : int = output_channel __UpperCAmelCase : str = ( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels ) __UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1 __UpperCAmelCase : Dict = get_up_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase_ ) __UpperCAmelCase : Union[str, Any] = output_channel # out __UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) __UpperCAmelCase : List[Any] = get_out_block( out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ): """simple docstring""" __UpperCAmelCase : Dict = timestep if not torch.is_tensor(UpperCAmelCase_ ): __UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0: __UpperCAmelCase : List[str] = timesteps[None].to(sample.device ) __UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ ) if self.config.use_timestep_embedding: __UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ ) else: __UpperCAmelCase : Any = timestep_embed[..., None] __UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) __UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down __UpperCAmelCase : int = () for downsample_block in self.down_blocks: __UpperCAmelCase , __UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: __UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): __UpperCAmelCase : Any = down_block_res_samples[-1:] __UpperCAmelCase : List[Any] = down_block_res_samples[:-1] __UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ ) # 5. post-process if self.out_block: __UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase_ )
37
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ : def __init__( self : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : Any=32 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Union[str, Any]=[1, 1, 2, 1] ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Union[str, Any]=None ,): '''simple docstring''' _UpperCamelCase : Optional[int] = parent _UpperCamelCase : Tuple = batch_size _UpperCamelCase : str = image_size _UpperCamelCase : Tuple = num_channels _UpperCamelCase : List[str] = embeddings_size _UpperCamelCase : Any = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : Any = is_training _UpperCamelCase : List[str] = use_labels _UpperCamelCase : Union[str, Any] = hidden_act _UpperCamelCase : List[Any] = num_labels _UpperCamelCase : Tuple = scope _UpperCamelCase : Union[str, Any] = len(lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : List[Any] = None if self.use_labels: _UpperCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_labels ) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = RegNetModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _UpperCamelCase : List[Any] = model(lowerCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ): '''simple docstring''' _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Dict = RegNetForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _UpperCamelCase : List[Any] = model(lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = config_and_inputs _UpperCamelCase : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( lowercase , lowercase , unittest.TestCase ): lowercase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () lowercase__ = ( {"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self : int ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = RegNetModelTester(self ) _UpperCamelCase : int = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Optional[Any] = model_class(lowerCamelCase__ ) _UpperCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Any = [*signature.parameters.keys()] _UpperCamelCase : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : str = model_class(config=lowerCamelCase__ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) self.assertTrue( torch.all(module.bias == 0 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ): _UpperCamelCase : Tuple = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): _UpperCamelCase : str = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) ) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : int = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,) _UpperCamelCase , _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : List[str] = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : Tuple = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Tuple = RegNetModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def A__ ( ): _UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : str ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ ) _UpperCamelCase : Any = self.default_image_processor _UpperCamelCase : int = prepare_img() _UpperCamelCase : Optional[int] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): _UpperCamelCase : int = model(**lowerCamelCase__ ) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) _UpperCamelCase : int = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
83
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a__ = datasets.utils.logging.get_logger(__name__) a__ = ["""names""", """prefix"""] a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""] a__ = ["""encoding_errors""", """on_bad_lines"""] a__ = ["""date_format"""] @dataclass class snake_case ( datasets.BuilderConfig ): '''simple docstring''' snake_case_ : str = "," snake_case_ : Optional[str] = None snake_case_ : Optional[Union[int, List[int], str]] = "infer" snake_case_ : Optional[List[str]] = None snake_case_ : Optional[List[str]] = None snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None snake_case_ : Optional[Union[List[int], List[str]]] = None snake_case_ : Optional[str] = None snake_case_ : bool = True snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None snake_case_ : Optional[list] = None snake_case_ : Optional[list] = None snake_case_ : bool = False snake_case_ : Optional[Union[int, List[int]]] = None snake_case_ : Optional[int] = None snake_case_ : Optional[Union[str, List[str]]] = None snake_case_ : bool = True snake_case_ : bool = True snake_case_ : bool = False snake_case_ : bool = True snake_case_ : Optional[str] = None snake_case_ : str = "." snake_case_ : Optional[str] = None snake_case_ : str = '"' snake_case_ : int = 0 snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : bool = True snake_case_ : bool = True snake_case_ : int = 0 snake_case_ : bool = True snake_case_ : bool = False snake_case_ : Optional[str] = None snake_case_ : int = 1_00_00 snake_case_ : Optional[datasets.Features] = None snake_case_ : Optional[str] = "strict" snake_case_ : Literal["error", "warn", "skip"] = "error" snake_case_ : Optional[str] = None def UpperCamelCase_ ( self : List[Any]) -> Dict: """simple docstring""" if self.delimiter is not None: _snake_case : str = self.delimiter if self.column_names is not None: _snake_case : str = self.column_names @property def UpperCamelCase_ ( self : List[Any]) -> str: """simple docstring""" _snake_case : Dict = { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class snake_case ( datasets.ArrowBasedBuilder ): '''simple docstring''' snake_case_ : Union[str, Any] = CsvConfig def UpperCamelCase_ ( self : str) -> List[str]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''') _snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files) if isinstance(lowerCAmelCase , (str, list, tuple)): _snake_case : int = data_files if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : int = [files] _snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})] _snake_case : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : List[str] = [files] _snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files})) return splits def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table: """simple docstring""" if self.config.features is not None: _snake_case : List[str] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()): # cheaper cast _snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase) else: # more expensive cast; allows str <-> int/float or str to Audio for example _snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase) return pa_table def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict: """simple docstring""" _snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _snake_case : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)): _snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(lowerCAmelCase): _snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''') raise
317
0
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def UpperCamelCase (lowercase_: Union[str, Any] ) -> Dict: A__ : int = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Union[str, Any] ) -> List[Any]: A__ : Optional[int] = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def UpperCamelCase (lowercase_: Any ) -> Optional[Any]: A__ : int = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") ) return token def UpperCamelCase () -> Any: A__ : List[Any] = [] head.append(("""layernorm.weight""", """norm.weight""") ) head.append(("""layernorm.bias""", """norm.bias""") ) head.append(("""classifier.weight""", """head.weight""") ) head.append(("""classifier.bias""", """head.bias""") ) return head def UpperCamelCase (lowercase_: str , lowercase_: List[Any] , lowercase_: List[str] , lowercase_: str ) -> Optional[int]: A__ : Dict = """imagenet-1k-id2label.json""" A__ : List[Any] = 1000 A__ : Optional[int] = """huggingface/label-files""" A__ : Dict = num_labels A__ : Optional[int] = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) A__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} A__ : Any = idalabel A__ : Tuple = {v: k for k, v in idalabel.items()} A__ : Any = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13": A__ : int = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21": A__ : int = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: A__ : Any = [2, 2, 20] A__ : Any = [3, 12, 16] A__ : Optional[int] = [192, 768, 1024] A__ : Tuple = CvtForImageClassification(__lowerCAmelCase ) A__ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) A__ : str = image_size A__ : str = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) ) A__ : Optional[int] = OrderedDict() A__ : str = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: A__ : Union[str, Any] = list_of_state_dict + cls_token(__lowerCAmelCase ) A__ : Any = list_of_state_dict + embeddings(__lowerCAmelCase ) for cnt in range(config.depth[idx] ): A__ : Optional[int] = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase ) A__ : Optional[int] = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): A__ : List[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) image_processor.save_pretrained(__lowerCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A_ : Tuple = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) A_ : List[Any] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
353
from queue import PriorityQueue from typing import Any import numpy as np def UpperCamelCase (lowercase_: dict , lowercase_: str , lowercase_: set , lowercase_: set , lowercase_: dict , lowercase_: dict , lowercase_: PriorityQueue , lowercase_: dict , lowercase_: float | int , ) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue A__ : Any = cst_fwd.get(lowercase_ , np.inf ) A__ : List[Any] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A__ : Tuple = new_cost_f A__ : Any = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCamelCase (lowercase_: str , lowercase_: str , lowercase_: dict , lowercase_: dict ) -> int: A__ : Dict = -1 A__ : List[Any] = set() A__ : Union[str, Any] = set() A__ : Optional[Any] = {source: 0} A__ : int = {destination: 0} A__ : Optional[Any] = {source: None} A__ : Union[str, Any] = {destination: None} A__ : PriorityQueue[Any] = PriorityQueue() A__ : PriorityQueue[Any] = PriorityQueue() A__ : List[Any] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A__ , A__ : Tuple = queue_forward.get() visited_forward.add(lowercase_ ) A__ , A__ : Optional[Any] = queue_backward.get() visited_backward.add(lowercase_ ) A__ : List[Any] = pass_and_relaxation( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) A__ : List[Any] = pass_and_relaxation( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A__ : int = shortest_distance return shortest_path_distance A_ : List[Any] = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A_ : Optional[int] = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
141
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = params SCREAMING_SNAKE_CASE__ : List[str] = np.array(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__(self ) -> int: """simple docstring""" return len(self.lengths ) def __magic_name__ (self ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.params.max_model_input_size SCREAMING_SNAKE_CASE__ : Dict = self.lengths > max_len logger.info(F'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' ) def divide_chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )] SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ : Any = [] if self.params.mlm: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: SCREAMING_SNAKE_CASE__ : int = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ ) if sub_s[-1] != sep_id: SCREAMING_SNAKE_CASE__ : str = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(SCREAMING_SNAKE_CASE__ ) new_tok_ids.extend(SCREAMING_SNAKE_CASE__ ) new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] ) SCREAMING_SNAKE_CASE__ : Dict = np.array(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = np.array(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = len(self ) SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths > 11 SCREAMING_SNAKE_CASE__ : Dict = self.token_ids[indices] SCREAMING_SNAKE_CASE__ : Optional[int] = self.lengths[indices] SCREAMING_SNAKE_CASE__ : str = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: SCREAMING_SNAKE_CASE__ : Optional[int] = self.params.special_tok_ids["""unk_token"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self ) SCREAMING_SNAKE_CASE__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) SCREAMING_SNAKE_CASE__ : List[str] = (unk_occs / self.lengths) < 0.5 SCREAMING_SNAKE_CASE__ : List[str] = self.token_ids[indices] SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths[indices] SCREAMING_SNAKE_CASE__ : List[str] = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = [t[0] for t in batch] SCREAMING_SNAKE_CASE__ : Tuple = [t[1] for t in batch] assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) # Max for paddings SCREAMING_SNAKE_CASE__ : int = max(SCREAMING_SNAKE_CASE__ ) # Pad token ids if self.params.mlm: SCREAMING_SNAKE_CASE__ : Any = self.params.special_tok_ids["""pad_token"""] else: SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""unk_token"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids] assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ ) assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ ) SCREAMING_SNAKE_CASE__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) SCREAMING_SNAKE_CASE__ : Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs) return tk_t, lg_t
25
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" snake_case__ = ShapEImgaImgPipeline snake_case__ = ["image"] snake_case__ = ["image"] snake_case__ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] snake_case__ = False @property def __lowerCAmelCase ( self : List[str] ): return 32 @property def __lowerCAmelCase ( self : str ): return 32 @property def __lowerCAmelCase ( self : int ): return self.time_input_dim * 4 @property def __lowerCAmelCase ( self : List[Any] ): return 8 @property def __lowerCAmelCase ( self : Optional[int] ): torch.manual_seed(0 ) UpperCAmelCase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,) UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ ) return model @property def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = CLIPImageProcessor( crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,) return image_processor @property def __lowerCAmelCase ( self : str ): torch.manual_seed(0 ) UpperCAmelCase__ = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ ) return model @property def __lowerCAmelCase ( self : Tuple ): torch.manual_seed(0 ) UpperCAmelCase__ = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ ) return model def __lowerCAmelCase ( self : Any ): UpperCAmelCase__ = self.dummy_prior UpperCAmelCase__ = self.dummy_image_encoder UpperCAmelCase__ = self.dummy_image_processor UpperCAmelCase__ = self.dummy_renderer UpperCAmelCase__ = HeunDiscreteScheduler( beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,) UpperCAmelCase__ = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ): UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ ) else: UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCAmelCase__ = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCAmelCase ( self : Optional[int] ): UpperCAmelCase__ = 'cpu' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) UpperCAmelCase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) UpperCAmelCase__ = output.images[0] UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCAmelCase__ = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Tuple ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self : Tuple ): UpperCAmelCase__ = torch_device == 'cpu' UpperCAmelCase__ = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,) def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) UpperCAmelCase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase__ = batch_size * [inputs[key]] UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Any ): UpperCAmelCase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) UpperCAmelCase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) UpperCAmelCase__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) UpperCAmelCase__ = pipe( lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
98
0
import numpy as np def _A ( lowercase ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _A ( lowercase ): """simple docstring""" return vector * sigmoid(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
367
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = IFInpaintingSuperResolutionPipeline __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) __lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"} def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[int]: if str(__A ).startswith('''mps''' ): a =torch.manual_seed(__A ) else: a =torch.Generator(device=__A ).manual_seed(__A ) a =floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A ) a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE ( self ) -> int: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def SCREAMING_SNAKE_CASE ( self ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self._test_save_load_local() def SCREAMING_SNAKE_CASE ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
215
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __lowerCAmelCase : Optional[Any] = TypeVar('T') class snake_case__ (Generic[T] ): """simple docstring""" def __init__( self : str , __lowerCamelCase : list[T] , __lowerCamelCase : Callable[[T, T], T] ) -> None: a = None a = len(__lowerCamelCase ) a = [any_type for _ in range(self.N )] + arr a = fnc self.build() def __UpperCAmelCase ( self : List[str] ) -> None: for p in range(self.N - 1 , 0 , -1 ): a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : T ) -> None: p += self.N a = v while p > 1: a = p // 2 a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> T | None: # noqa: E741 a , a = l + self.N, r + self.N a = None while l <= r: if l % 2 == 1: a = self.st[l] if res is None else self.fn(__lowerCamelCase , self.st[l] ) if r % 2 == 0: a = self.st[r] if res is None else self.fn(__lowerCamelCase , self.st[r] ) a , a = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __lowerCAmelCase : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __lowerCAmelCase : Dict = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __lowerCAmelCase : List[str] = SegmentTree(test_array, min) __lowerCAmelCase : Union[str, Any] = SegmentTree(test_array, max) __lowerCAmelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def __magic_name__ ( ): '''simple docstring''' for i in range(len(A ) ): for j in range(A, len(A ) ): a = reduce(A, test_array[i : j + 1] ) a = reduce(A, test_array[i : j + 1] ) a = reduce(lambda A, A : a + b, test_array[i : j + 1] ) assert min_range == min_segment_tree.query(A, A ) assert max_range == max_segment_tree.query(A, A ) assert sum_range == sum_segment_tree.query(A, A ) test_all_segments() for index, value in test_updates.items(): __lowerCAmelCase : List[Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
107
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8 if is_torch_available(): import torch def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ): '''simple docstring''' if rng is None: a = random.Random() a = 1 for dim in shape: total_dims *= dim a = [] for _ in range(A ): values.append(rng.randint(0, vocab_size - 1 ) ) a = np.array(A, dtype=jnp.intaa ).reshape(A ) return output def __magic_name__ ( A : Dict, A : Union[str, Any]=None ): '''simple docstring''' a = ids_tensor(A, vocab_size=2, rng=A ) # make sure that at least one token is attended to for each batch a = 1 return attn_mask @require_flax class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : Any = () def __UpperCAmelCase ( self : int ) -> List[str]: a , a = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 a = 2 a = inputs["input_ids"].shape[-1] // 2 a = inputs["input_ids"][:max_batch_size, :sequence_length] a = jnp.ones_like(__lowerCamelCase ) a = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens a = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` a = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def __UpperCAmelCase ( self : Optional[Any] ) -> int: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 0 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model_class.__name__[4:] # Skip the "Flax" at the beginning a = getattr(__lowerCamelCase , __lowerCamelCase ) a = pt_model_class(__lowerCamelCase ).eval() a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params ) a = flax_model.generate(__lowerCamelCase ).sequences a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: a , a , a , a = self._get_input_ids_and_config() a = True a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : int ) -> Dict: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 2 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 2 a = 2 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: a , a , a , a = self._get_input_ids_and_config() a = True a = max_length a = 0.8 a = 10 a = 0.3 a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a , a , a , a = self._get_input_ids_and_config() a = max_length a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a , a , a , a = self._get_input_ids_and_config() a = max_length a = 2 a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = False a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = True a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = 2 a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) a = "Hello world" a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ): model.generate(__lowerCamelCase , do_samples=__lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCamelCase , "foo" ): a = {"foo": "bar"} model.generate(__lowerCamelCase , **__lowerCamelCase )
107
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a : '''simple docstring''' def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : Optional[int] = image_size SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : List[Any] = text_seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Tuple = use_input_mask SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = type_vocab_size SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size SCREAMING_SNAKE_CASE : List[Any] = initializer_range SCREAMING_SNAKE_CASE : str = coordinate_size SCREAMING_SNAKE_CASE : Tuple = shape_size SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : int = num_choices SCREAMING_SNAKE_CASE : str = scope SCREAMING_SNAKE_CASE : Optional[int] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE : str = bbox[i, j, 3] SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1] SCREAMING_SNAKE_CASE : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2] SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0] SCREAMING_SNAKE_CASE : Tuple = t SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Tuple = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A ) model.to(A ) model.eval() # text + image SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A ) SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE : List[Any] = model(A ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Dict = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : str = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model( A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE : Dict = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = False A : List[str] = False A : Union[str, Any] = False A : Optional[Any] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) A : List[Any] = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def UpperCamelCase_ ( self, A, A, A, A, A ): '''simple docstring''' return True def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 ) def UpperCamelCase_ ( self, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A ) if model_class in get_values(A ): SCREAMING_SNAKE_CASE : Optional[int] = { k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous() if isinstance(A, torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(A ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in get_values(A ): SCREAMING_SNAKE_CASE : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) SCREAMING_SNAKE_CASE : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in [ *get_values(A ), ]: SCREAMING_SNAKE_CASE : str = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=A ) elif model_class in [ *get_values(A ), ]: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, ) return inputs_dict def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : List[str] = type self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A ) SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : Any = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass SCREAMING_SNAKE_CASE : Union[str, Any] = model( input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), ) # verify the logits SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape, A ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
246
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 SCREAMING_SNAKE_CASE : str = 1 SCREAMING_SNAKE_CASE : Optional[int] = 1 while repunit: SCREAMING_SNAKE_CASE : List[str] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
246
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
0
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS) lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowercase = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCamelCase_ ( UpperCamelCase__ : str ): '''simple docstring''' UpperCamelCase__ = None # source code of `config_class` UpperCamelCase__ = inspect.getsource(UpperCamelCase__ ) UpperCamelCase__ = _re_checkpoint.findall(UpperCamelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): UpperCamelCase__ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase__ = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase__ = ckpt_name break return checkpoint def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase__ = get_checkpoint_from_config_class(UpperCamelCase__ ) UpperCamelCase__ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: UpperCamelCase__ = '''\n'''.join(sorted(UpperCamelCase__ ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
364
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class __lowercase ( A ): '''simple docstring''' _A : int = '''align_text_model''' def __init__( self : Tuple , _a : Tuple=30_522 , _a : str=768 , _a : Tuple=12 , _a : Dict=12 , _a : Any=3_072 , _a : str="gelu" , _a : int=0.1 , _a : Optional[Any]=0.1 , _a : int=512 , _a : List[str]=2 , _a : Any=0.02 , _a : Dict=1E-12 , _a : Tuple=0 , _a : Optional[Any]="absolute" , _a : str=True , **_a : Union[str, Any] , ): super().__init__(**_a ) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = position_embedding_type UpperCamelCase__ = use_cache UpperCamelCase__ = pad_token_id @classmethod def A_ ( cls : List[str] , _a : Union[str, os.PathLike] , **_a : Any ): cls._set_token_in_kwargs(_a ) UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": UpperCamelCase__ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class __lowercase ( A ): '''simple docstring''' _A : List[Any] = '''align_vision_model''' def __init__( self : List[str] , _a : int = 3 , _a : int = 600 , _a : float = 2.0 , _a : float = 3.1 , _a : int = 8 , _a : List[int] = [3, 3, 5, 3, 5, 5, 3] , _a : List[int] = [32, 16, 24, 40, 80, 112, 192] , _a : List[int] = [16, 24, 40, 80, 112, 192, 320] , _a : List[int] = [] , _a : List[int] = [1, 2, 2, 2, 1, 2, 1] , _a : List[int] = [1, 2, 2, 3, 3, 4, 1] , _a : List[int] = [1, 6, 6, 6, 6, 6, 6] , _a : float = 0.25 , _a : str = "swish" , _a : int = 2_560 , _a : str = "mean" , _a : float = 0.02 , _a : float = 0.001 , _a : float = 0.99 , _a : float = 0.2 , **_a : List[Any] , ): super().__init__(**_a ) UpperCamelCase__ = num_channels UpperCamelCase__ = image_size UpperCamelCase__ = width_coefficient UpperCamelCase__ = depth_coefficient UpperCamelCase__ = depth_divisor UpperCamelCase__ = kernel_sizes UpperCamelCase__ = in_channels UpperCamelCase__ = out_channels UpperCamelCase__ = depthwise_padding UpperCamelCase__ = strides UpperCamelCase__ = num_block_repeats UpperCamelCase__ = expand_ratios UpperCamelCase__ = squeeze_expansion_ratio UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dim UpperCamelCase__ = pooling_type UpperCamelCase__ = initializer_range UpperCamelCase__ = batch_norm_eps UpperCamelCase__ = batch_norm_momentum UpperCamelCase__ = drop_connect_rate UpperCamelCase__ = sum(_a ) * 4 @classmethod def A_ ( cls : Tuple , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ): cls._set_token_in_kwargs(_a ) UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": UpperCamelCase__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class __lowercase ( A ): '''simple docstring''' _A : List[Any] = '''align''' _A : Optional[int] = True def __init__( self : Optional[int] , _a : Tuple=None , _a : int=None , _a : Any=640 , _a : Optional[Any]=1.0 , _a : Tuple=0.02 , **_a : List[Any] , ): super().__init__(**_a ) if text_config is None: UpperCamelCase__ = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: UpperCamelCase__ = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) UpperCamelCase__ = AlignTextConfig(**_a ) UpperCamelCase__ = AlignVisionConfig(**_a ) UpperCamelCase__ = projection_dim UpperCamelCase__ = temperature_init_value UpperCamelCase__ = initializer_range @classmethod def A_ ( cls : Optional[int] , _a : AlignTextConfig , _a : AlignVisionConfig , **_a : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a ) def A_ ( self : Tuple ): UpperCamelCase__ = copy.deepcopy(self.__dict__ ) UpperCamelCase__ = self.text_config.to_dict() UpperCamelCase__ = self.vision_config.to_dict() UpperCamelCase__ = self.__class__.model_type return output
35
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a: Union[str, Any] = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: str = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
198
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowerCamelCase = logging.get_logger(__name__) @dataclass class a ( lowercase_ ): '''simple docstring''' lowerCAmelCase : List[str] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Dict , **__snake_case : List[str] ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCAmelCase_ = deprecated_arg[3:] UpperCAmelCase_ = not kwargs.pop(a__ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) UpperCAmelCase_ = kwargs.pop('''tpu_name''' , self.tpu_name ) UpperCAmelCase_ = kwargs.pop('''device_idx''' , self.device_idx ) UpperCAmelCase_ = kwargs.pop('''eager_mode''' , self.eager_mode ) UpperCAmelCase_ = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**a__ ) lowerCAmelCase : str = field( default=lowercase_ , metadata={'help': 'Name of TPU'} , ) lowerCAmelCase : int = field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowerCAmelCase : bool = field(default=lowercase_ , metadata={'help': 'Benchmark models in eager model.'} ) lowerCAmelCase : bool = field( default=lowercase_ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def lowerCamelCase_ ( self : List[str] ): requires_backends(self , ['''tf'''] ) UpperCAmelCase_ = None if self.tpu: try: if self.tpu_name: UpperCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: UpperCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: UpperCAmelCase_ = None return tpu @cached_property def lowerCamelCase_ ( self : Any ): requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) UpperCAmelCase_ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) UpperCAmelCase_ = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU UpperCAmelCase_ = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def lowerCamelCase_ ( self : Dict ): requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def lowerCamelCase_ ( self : Any ): requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def lowerCamelCase_ ( self : Optional[Any] ): requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def lowerCamelCase_ ( self : Optional[int] ): requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def lowerCamelCase_ ( self : List[Any] ): return self.n_gpu > 0
354
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=_A ) class a ( _A ): '''simple docstring''' lowerCAmelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) lowerCAmelCase : ClassVar[Features] = Features({'audio': Audio()} ) lowerCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) lowerCAmelCase : str = "audio" lowerCAmelCase : str = "labels" def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[Any] ): if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __snake_case ): raise ValueError(F'Column {self.label_column} is not a ClassLabel.' ) UpperCAmelCase_ = copy.deepcopy(self ) UpperCAmelCase_ = self.label_schema.copy() UpperCAmelCase_ = features[self.label_column] UpperCAmelCase_ = label_schema return task_template @property def lowerCamelCase_ ( self : Tuple ): return { self.audio_column: "audio", self.label_column: "labels", }
177
0
def _lowercase ( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return [tuple(UpperCamelCase_ )] SCREAMING_SNAKE_CASE__ = [] def generate(UpperCamelCase_ , UpperCamelCase_ ): SCREAMING_SNAKE_CASE__ = [0] * n res.append(tuple(UpperCamelCase_ ) ) SCREAMING_SNAKE_CASE__ = 0 while i < n: if c[i] < i: if i % 2 == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = arr[i], arr[0] else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = arr[i], arr[c[i]] res.append(tuple(UpperCamelCase_ ) ) c[i] += 1 SCREAMING_SNAKE_CASE__ = 0 else: SCREAMING_SNAKE_CASE__ = 0 i += 1 generate(len(UpperCamelCase_ ) , UpperCamelCase_ ) return res if __name__ == "__main__": __snake_case = input("""Enter numbers separated by a comma:\n""").strip() __snake_case = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
176
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class lowercase__ ( _UpperCAmelCase ): A__ : int ="""xmod""" def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=30522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-1_2 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=("en_XX",) , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : str , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = position_embedding_type SCREAMING_SNAKE_CASE__ = use_cache SCREAMING_SNAKE_CASE__ = classifier_dropout SCREAMING_SNAKE_CASE__ = pre_norm SCREAMING_SNAKE_CASE__ = adapter_reduction_factor SCREAMING_SNAKE_CASE__ = adapter_layer_norm SCREAMING_SNAKE_CASE__ = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE__ = ln_before_adapter SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = default_language class lowercase__ ( _UpperCAmelCase ): @property def A_ ( self : List[Any] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
176
1
from __future__ import annotations import typing from collections import Counter def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter() for base in range(1, max_perimeter + 1 ): for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ): _SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase__ (__lowerCamelCase = 1000 ): _SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"Perimeter {solution()} has maximum solutions")
325
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase__ ={ 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
325
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _a ( UpperCamelCase_ : Dict ) -> List[str]: """simple docstring""" lowerCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) ) lowerCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) ) lowerCAmelCase__ = 0.01 with locka.acquire(): with pytest.raises(UpperCamelCase_ ): lowerCAmelCase__ = time.time() locka.acquire(UpperCamelCase_ ) assert time.time() - _start > timeout def _a ( UpperCamelCase_ : str ) -> Tuple: """simple docstring""" lowerCAmelCase__ = "a" * 1_000 + ".lock" lowerCAmelCase__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(UpperCamelCase_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 lowerCAmelCase__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(UpperCamelCase_ ): locka.acquire(0 )
340
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class lowercase__ ( _UpperCAmelCase ): def __init__( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int: '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = {} if "candidate_labels" in kwargs: lowerCAmelCase__ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCAmelCase__ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = load_image(__UpperCAmelCase ) lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCAmelCase__ = candidate_labels lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase ) lowerCAmelCase__ = [text_inputs] return inputs def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = model_inputs.pop("candidate_labels" ) lowerCAmelCase__ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __UpperCAmelCase ): lowerCAmelCase__ = text_inputs[0] else: # Batching case. lowerCAmelCase__ = text_inputs[0][0] lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple: '''simple docstring''' lowerCAmelCase__ = model_outputs.pop("candidate_labels" ) lowerCAmelCase__ = model_outputs["logits"][0] if self.framework == "pt": lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ = probs.tolist() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [scores] elif self.framework == "tf": lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 ) lowerCAmelCase__ = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) lowerCAmelCase__ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] ) ] return result
340
1
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) lowerCamelCase_ =AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(__lowerCAmelCase ) from datasets import load_dataset lowerCamelCase_ =load_dataset("""nielsr/rvlcdip-demo""" ) lowerCamelCase_ =dataset["""train"""][0]["""image"""].convert("""RGB""" ) lowerCamelCase_ =image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**__lowerCAmelCase ) lowerCamelCase_ =outputs.logits lowerCamelCase_ =torch.Size((1, 16) ) self.assertEqual(logits.shape , __lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__lowerCAmelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
370
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __UpperCamelCase ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ) ->list[float]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =coefficient_matrix.shape lowerCamelCase_ , lowerCamelCase_ =constant_matrix.shape if rowsa != colsa: lowerCamelCase_ =f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(_A ) if colsa != 1: lowerCamelCase_ =f'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(_A ) if rowsa != rowsa: lowerCamelCase_ =( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ f'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(_A ) if len(_A ) != rowsa: lowerCamelCase_ =( """Number of initial values must be equal to number of rows in coefficient """ f'matrix but received {len(_A )} and {rowsa}' ) raise ValueError(_A ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) lowerCamelCase_ =np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowerCamelCase_ , lowerCamelCase_ =table.shape strictly_diagonally_dominant(_A ) # Iterates the whole matrix for given number of times for _ in range(_A ): lowerCamelCase_ =[] for row in range(_A ): lowerCamelCase_ =0 for col in range(_A ): if col == row: lowerCamelCase_ =table[row][col] elif col == cols - 1: lowerCamelCase_ =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowerCamelCase_ =(temp + val) / denom new_val.append(_A ) lowerCamelCase_ =new_val return [float(_A ) for i in new_val] def __UpperCamelCase ( _A : NDArray[floataa] ) ->bool: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =table.shape lowerCamelCase_ =True for i in range(0 , _A ): lowerCamelCase_ =0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
49
0
def UpperCAmelCase__ ( _A : int ): '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ): a__ =F"""Input value of [number={number}] must be an integer""" raise TypeError(snake_case_ ) if number < 0: return False a__ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
188
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' def a ( self : int ) -> Optional[Any]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def a ( self : List[Any] ) -> Any: __lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(SCREAMING_SNAKE_CASE__ ) def a ( self : List[Any] ) -> Tuple: __lowerCAmelCase = self._create_example_records() __lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(SCREAMING_SNAKE_CASE__ ): self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] ) def a ( self : Tuple ) -> List[str]: __lowerCAmelCase = self._create_example_records() __lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns __lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}] __lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record __lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}] __lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def a ( self : Optional[Any] ) -> Tuple: __lowerCAmelCase = Dataset.from_list([] ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 ) self.assertListEqual(dset.column_names , [] )
229
0
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path _SCREAMING_SNAKE_CASE = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) _SCREAMING_SNAKE_CASE = [ord(letter) for letter in string.ascii_lowercase] _SCREAMING_SNAKE_CASE = {ord(char) for char in VALID_CHARS} _SCREAMING_SNAKE_CASE = ["""the""", """be""", """to""", """of""", """and""", """in""", """that""", """have"""] def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : str = "" snake_case_ : int snake_case_ : int snake_case_ : int for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): snake_case_ : Dict = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : list[str] = [] for key in product(__lowerCAmelCase , repeat=3 ): snake_case_ : Dict = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def SCREAMING_SNAKE_CASE__ ( __a , __a ): return [possible for possible in possibles if common_word in possible.lower()] def SCREAMING_SNAKE_CASE__ ( __a = "p059_cipher.txt" ): snake_case_ : list[int] snake_case_ : list[str] snake_case_ : str snake_case_ : str snake_case_ : str = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding='utf-8' ) snake_case_ : Optional[int] = [int(__lowerCAmelCase ) for number in data.strip().split(',' )] snake_case_ : Any = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: snake_case_ : Optional[Any] = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break snake_case_ : Tuple = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
362
from decimal import Decimal, getcontext from math import ceil, factorial def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) snake_case_ : Dict = precision snake_case_ : str = ceil(precision / 14 ) snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt() snake_case_ : Tuple = 1 snake_case_ : int = 13_59_14_09 snake_case_ : Tuple = Decimal(__a ) for k in range(1 , __a ): snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
88
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case ) print(f"""Building PyTorch model from configuration: {config}""" ) __snake_case : Tuple = AlbertForPreTraining(_snake_case ) # Load weights from tf checkpoint load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
102
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str: '''simple docstring''' try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False) def A ( _UpperCAmelCase : List[str] ) -> List[str]: '''simple docstring''' return unittest.skip('Test was skipped' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> str: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> str: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : str ) -> str: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict: '''simple docstring''' if test_case is None: return partial(_UpperCAmelCase , version=_UpperCAmelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> int: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase ) def A ( _UpperCAmelCase : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase ) UpperCAmelCase__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A ( _UpperCAmelCase : List[str] ) -> Any: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase ) class __lowerCAmelCase ( unittest.TestCase ): UpperCamelCase = True @classmethod def _lowerCamelCase ( cls : List[Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = tempfile.mkdtemp() @classmethod def _lowerCamelCase ( cls : Union[str, Any]) -> str: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _lowerCamelCase ( self : List[str]) -> List[Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('**/*'): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Dict) -> Tuple: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple: """simple docstring""" _UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A ( _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' _UpperCAmelCase = AcceleratorState() _UpperCAmelCase = tensor[None].clone().to(state.device ) _UpperCAmelCase = gather(_UpperCAmelCase ).cpu() _UpperCAmelCase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCAmelCase ): return False return True class __lowerCAmelCase : def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]: """simple docstring""" _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' while True: _UpperCAmelCase = await stream.readline() if line: callback(_UpperCAmelCase ) else: break async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(_UpperCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ): _UpperCAmelCase = line.decode('utf-8' ).rstrip() sink.append(_UpperCAmelCase ) if not quiet: print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCAmelCase , ) return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase ) def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput: '''simple docstring''' _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) ) _UpperCAmelCase = ' '.join(_UpperCAmelCase ) if result.returncode > 0: _UpperCAmelCase = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) return result class __lowerCAmelCase ( A ): pass def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple: '''simple docstring''' try: _UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCAmelCase , 'decode' ): _UpperCAmelCase = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
339
0
import random def _a ( lowerCamelCase: int , lowerCamelCase: float , lowerCamelCase: bool = False ) -> dict: '''simple docstring''' __A = {i: [] for i in range(lowerCamelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(lowerCamelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(lowerCamelCase ): for j in range(i + 1 , lowerCamelCase ): if random.random() < probability: graph[i].append(lowerCamelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(lowerCamelCase ) return graph def _a ( lowerCamelCase: int ) -> dict: '''simple docstring''' return { i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
250
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Union[str, Any] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : str = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
250
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : list[list[int]] = [[0 for _ in range(UpperCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCAmelCase__ : Tuple = 1 for n in range(m + 1 ): for k in range(1 , UpperCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCAmelCase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: _lowerCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : "DiagonalGaussianDistribution" class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = True @register_to_config def __init__( self : str , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase__ : Tuple[int] = (64,) , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "silu" , lowerCAmelCase__ : int = 4 , lowerCAmelCase__ : int = 32 , lowerCAmelCase__ : int = 32 , lowerCAmelCase__ : float = 0.1_8215 , ): super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE_: Optional[Any] = Encoder( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , down_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , double_z=lowerCAmelCase__ , ) # pass init params to Decoder SCREAMING_SNAKE_CASE_: List[Any] = Decoder( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , up_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1) SCREAMING_SNAKE_CASE_: Tuple = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1) SCREAMING_SNAKE_CASE_: str = False SCREAMING_SNAKE_CASE_: List[Any] = False # only relevant if vae tiling is enabled SCREAMING_SNAKE_CASE_: Optional[int] = self.config.sample_size SCREAMING_SNAKE_CASE_: Optional[int] = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple)) else self.config.sample_size ) SCREAMING_SNAKE_CASE_: str = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) SCREAMING_SNAKE_CASE_: int = 0.25 def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=False): if isinstance(lowerCAmelCase__ , (Encoder, Decoder)): SCREAMING_SNAKE_CASE_: Optional[int] = value def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : bool = True): SCREAMING_SNAKE_CASE_: List[Any] = use_tiling def _SCREAMING_SNAKE_CASE ( self : List[str]): self.enable_tiling(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = True def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Any = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = {} def fn_recursive_add_processors(lowerCAmelCase__ : str , lowerCAmelCase__ : torch.nn.Module , lowerCAmelCase__ : Dict[str, AttentionProcessor]): if hasattr(lowerCAmelCase__ , "set_processor"): SCREAMING_SNAKE_CASE_: Tuple = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) return processors def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): SCREAMING_SNAKE_CASE_: List[str] = len(self.attn_processors.keys()) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(lowerCAmelCase__)} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes.") def fn_recursive_attn_processor(lowerCAmelCase__ : str , lowerCAmelCase__ : torch.nn.Module , lowerCAmelCase__ : Optional[Any]): if hasattr(lowerCAmelCase__ , "set_processor"): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__): module.set_processor(lowerCAmelCase__) else: module.set_processor(processor.pop(F"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): self.set_attn_processor(AttnProcessor()) @apply_forward_hook def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCAmelCase__ , return_dict=lowerCAmelCase__) if self.use_slicing and x.shape[0] > 1: SCREAMING_SNAKE_CASE_: int = [self.encoder(lowerCAmelCase__) for x_slice in x.split(1)] SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: str = self.encoder(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = self.quant_conv(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = DiagonalGaussianDistribution(lowerCAmelCase__) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCAmelCase__ , return_dict=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = self.post_quant_conv(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = self.decoder(lowerCAmelCase__) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase__) @apply_forward_hook def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True): if self.use_slicing and z.shape[0] > 1: SCREAMING_SNAKE_CASE_: int = [self._decode(lowerCAmelCase__).sample for z_slice in z.split(1)] SCREAMING_SNAKE_CASE_: Optional[int] = torch.cat(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Optional[Any] = self._decode(lowerCAmelCase__).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Dict = min(a.shape[2] , b.shape[2] , lowerCAmelCase__) for y in range(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Union[str, Any] = min(a.shape[3] , b.shape[3] , lowerCAmelCase__) for x in range(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True): SCREAMING_SNAKE_CASE_: int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) SCREAMING_SNAKE_CASE_: str = int(self.tile_latent_min_size * self.tile_overlap_factor) SCREAMING_SNAKE_CASE_: List[Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. SCREAMING_SNAKE_CASE_: List[str] = [] for i in range(0 , x.shape[2] , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[Any] = [] for j in range(0 , x.shape[3] , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] SCREAMING_SNAKE_CASE_: Dict = self.encoder(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.quant_conv(lowerCAmelCase__) row.append(lowerCAmelCase__) rows.append(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = [] for i, row in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = [] for j, tile in enumerate(lowerCAmelCase__): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: SCREAMING_SNAKE_CASE_: List[str] = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__) if j > 0: SCREAMING_SNAKE_CASE_: Union[str, Any] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(lowerCAmelCase__ , dim=3)) SCREAMING_SNAKE_CASE_: List[str] = torch.cat(lowerCAmelCase__ , dim=2) SCREAMING_SNAKE_CASE_: str = DiagonalGaussianDistribution(lowerCAmelCase__) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True): SCREAMING_SNAKE_CASE_: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) SCREAMING_SNAKE_CASE_: Tuple = int(self.tile_sample_min_size * self.tile_overlap_factor) SCREAMING_SNAKE_CASE_: List[str] = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. SCREAMING_SNAKE_CASE_: Optional[Any] = [] for i in range(0 , z.shape[2] , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Union[str, Any] = [] for j in range(0 , z.shape[3] , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] SCREAMING_SNAKE_CASE_: Optional[Any] = self.post_quant_conv(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = self.decoder(lowerCAmelCase__) row.append(lowerCAmelCase__) rows.append(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = [] for i, row in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: int = [] for j, tile in enumerate(lowerCAmelCase__): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: SCREAMING_SNAKE_CASE_: Tuple = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__) if j > 0: SCREAMING_SNAKE_CASE_: List[str] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(lowerCAmelCase__ , dim=3)) SCREAMING_SNAKE_CASE_: int = torch.cat(lowerCAmelCase__ , dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[torch.Generator] = None , ): SCREAMING_SNAKE_CASE_: int = sample SCREAMING_SNAKE_CASE_: int = self.encode(lowerCAmelCase__).latent_dist if sample_posterior: SCREAMING_SNAKE_CASE_: List[Any] = posterior.sample(generator=lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: str = posterior.mode() SCREAMING_SNAKE_CASE_: int = self.decode(lowerCAmelCase__).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase__)
361
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = -1 SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_: int = TextStreamer(lowerCAmelCase__) model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = -1 SCREAMING_SNAKE_CASE_: int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.decode(greedy_ids[0]) SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE_: Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__) thread.start() SCREAMING_SNAKE_CASE_: Optional[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = -1 SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__) model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE_: Any = cs.out[:-1] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("distilgpt2") SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = -1 SCREAMING_SNAKE_CASE_: List[str] = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_: Union[str, Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__) model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE_: str = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = -1 SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001) SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE_: Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Tuple = "" for new_text in streamer: streamer_text += new_text
127
0
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _a ( *a :Optional[Any] , a :Optional[Union[Dict, Any]] = None , a :List[str]=True , a :List[str]=2 ) -> int: from .. import __version__ a = take_from a = () if not isinstance(args[0] , a ): a = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a ).base_version ) >= version.parse(a ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""" ) a = None if isinstance(a , a ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a ),) a = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(a , a ): values += (getattr(a , a ),) a = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: a = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: a = warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message , a , stacklevel=a ) if isinstance(a , a ) and len(a ) > 0: a = inspect.getouterframes(inspect.currentframe() )[1] a = call_frame.filename a = call_frame.lineno a = call_frame.function a , a = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(a ) == 0: return elif len(a ) == 1: return values[0] return values
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCAmelCase = '''\ Text data. Second line of data.''' UpperCAmelCase = '''file''' @pytest.fixture(scope='session' ) def __UpperCamelCase ( lowercase__ : List[Any] ): '''simple docstring''' __lowercase =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') __lowercase =bytes(lowercase__, 'utf-8' ) with zstd.open(lowercase__, 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture def __UpperCamelCase ( lowercase__ : str ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir, lowercase__ ), 'w' ) as f: f.write(lowercase__ ) return FILE_PATH @pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] ) def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[str], lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int, lowercase__ : Dict ): '''simple docstring''' __lowercase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} __lowercase =input_paths[compression_format] __lowercase =tmp_path / 'cache' __lowercase =DownloadConfig(cache_dir=lowercase__, extract_compressed_file=lowercase__ ) __lowercase =cached_path(lowercase__, download_config=lowercase__ ) with open(lowercase__ ) as f: __lowercase =f.read() with open(lowercase__ ) as f: __lowercase =f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted', [True, False] ) @pytest.mark.parametrize('default_cache_dir', [True, False] ) def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Tuple, lowercase__ : int, lowercase__ : int, lowercase__ : Optional[int] ): '''simple docstring''' __lowercase ='custom_cache' __lowercase ='custom_extracted_dir' __lowercase =tmp_path / 'custom_extracted_path' if default_extracted: __lowercase =('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', lowercase__ ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(lowercase__ ) ) __lowercase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __lowercase =xz_file __lowercase =( DownloadConfig(extract_compressed_file=lowercase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowercase__ ) ) __lowercase =cached_path(lowercase__, download_config=lowercase__ ) assert Path(lowercase__ ).parent.parts[-2:] == expected def __UpperCamelCase ( lowercase__ : List[Any] ): '''simple docstring''' __lowercase =str(Path(lowercase__ ).resolve() ) assert cached_path(lowercase__ ) == text_file # relative path __lowercase =str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowercase__ ) == text_file def __UpperCamelCase ( lowercase__ : Optional[Any] ): '''simple docstring''' __lowercase =str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(lowercase__ ): cached_path(lowercase__ ) # relative path __lowercase ='./__missing_file__.txt' with pytest.raises(lowercase__ ): cached_path(lowercase__ ) def __UpperCamelCase ( lowercase__ : List[str] ): '''simple docstring''' __lowercase =get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(lowercase__ ) as f: __lowercase =f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ ) def __UpperCamelCase ( ): '''simple docstring''' with pytest.raises(lowercase__ ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ ) def __UpperCamelCase ( lowercase__ : Any ): '''simple docstring''' __lowercase =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(lowercase__ ): http_get('https://huggingface.co', temp_file=lowercase__ ) with pytest.raises(lowercase__ ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ ) def __UpperCamelCase ( lowercase__ : Optional[int] ): '''simple docstring''' __lowercase =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(lowercase__ ): ftp_get('ftp://huggingface.co', temp_file=lowercase__ ) with pytest.raises(lowercase__ ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ ) def __UpperCamelCase ( lowercase__ : Any ): '''simple docstring''' __lowercase =tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(lowercase__ ): fsspec_get('s3://huggingface.co', temp_file=lowercase__ ) with pytest.raises(lowercase__ ): fsspec_head('s3://huggingface.co' )
141
0
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : Tuple , a__ : int , a__ : int , a__ : int , a__ : List[Any]=0.0 , a__ : Optional[int] = None , a__ : str = "geglu" , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : str = "layer_norm" , a__ : bool = False , ): """simple docstring""" super().__init__() __snake_case = only_cross_attention __snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' __snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: __snake_case = AdaLayerNorm(a__ , a__ ) elif self.use_ada_layer_norm_zero: __snake_case = AdaLayerNormZero(a__ , a__ ) else: __snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ ) __snake_case = Attention( query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. __snake_case = ( AdaLayerNorm(a__ , a__ ) if self.use_ada_layer_norm else nn.LayerNorm(a__ , elementwise_affine=a__ ) ) __snake_case = Attention( query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none else: __snake_case = None __snake_case = None # 3. Feed-forward __snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ ) __snake_case = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ ) # let chunk size default to None __snake_case = None __snake_case = 0 def a (self : Tuple , a__ : Optional[int] , a__ : int ): """simple docstring""" __snake_case = chunk_size __snake_case = dim def a (self : str , a__ : torch.FloatTensor , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Dict[str, Any] = None , a__ : Optional[torch.LongTensor] = None , ): """simple docstring""" if self.use_ada_layer_norm: __snake_case = self.norma(a__ , a__ ) elif self.use_ada_layer_norm_zero: __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.norma( a__ , a__ , a__ , hidden_dtype=hidden_states.dtype ) else: __snake_case = self.norma(a__ ) __snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {} __snake_case = self.attna( a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , ) if self.use_ada_layer_norm_zero: __snake_case = gate_msa.unsqueeze(1 ) * attn_output __snake_case = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: __snake_case = ( self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ ) ) __snake_case = self.attna( a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , ) __snake_case = attn_output + hidden_states # 3. Feed-forward __snake_case = self.norma(a__ ) if self.use_ada_layer_norm_zero: __snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" ) __snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size __snake_case = torch.cat( [self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: __snake_case = self.ff(a__ ) if self.use_ada_layer_norm_zero: __snake_case = gate_mlp.unsqueeze(1 ) * ff_output __snake_case = ff_output + hidden_states return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : Any , a__ : int , a__ : Optional[int] = None , a__ : int = 4 , a__ : float = 0.0 , a__ : str = "geglu" , a__ : bool = False , ): """simple docstring""" super().__init__() __snake_case = int(dim * mult ) __snake_case = dim_out if dim_out is not None else dim if activation_fn == "gelu": __snake_case = GELU(a__ , a__ ) if activation_fn == "gelu-approximate": __snake_case = GELU(a__ , a__ , approximate='''tanh''' ) elif activation_fn == "geglu": __snake_case = GEGLU(a__ , a__ ) elif activation_fn == "geglu-approximate": __snake_case = ApproximateGELU(a__ , a__ ) __snake_case = nn.ModuleList([] ) # project in self.net.append(a__ ) # project dropout self.net.append(nn.Dropout(a__ ) ) # project out self.net.append(nn.Linear(a__ , a__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(a__ ) ) def a (self : str , a__ : int ): """simple docstring""" for module in self.net: __snake_case = module(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : int , a__ : int , a__ : int , a__ : str = "none" ): """simple docstring""" super().__init__() __snake_case = nn.Linear(a__ , a__ ) __snake_case = approximate def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" if gate.device.type != "mps": return F.gelu(a__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def a (self : Any , a__ : Optional[Any] ): """simple docstring""" __snake_case = self.proj(a__ ) __snake_case = self.gelu(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : Dict , a__ : int , a__ : int ): """simple docstring""" super().__init__() __snake_case = nn.Linear(a__ , dim_out * 2 ) def a (self : Any , a__ : List[Any] ): """simple docstring""" if gate.device.type != "mps": return F.gelu(a__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a (self : str , a__ : Dict ): """simple docstring""" __snake_case , __snake_case = self.proj(a__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(a__ ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : str , a__ : int , a__ : int ): """simple docstring""" super().__init__() __snake_case = nn.Linear(a__ , a__ ) def a (self : Tuple , a__ : Tuple ): """simple docstring""" __snake_case = self.proj(a__ ) return x * torch.sigmoid(1.7_0_2 * x ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : Tuple , a__ : Tuple , a__ : Optional[int] ): """simple docstring""" super().__init__() __snake_case = nn.Embedding(a__ , a__ ) __snake_case = nn.SiLU() __snake_case = nn.Linear(a__ , embedding_dim * 2 ) __snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ ) def a (self : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] ): """simple docstring""" __snake_case = self.linear(self.silu(self.emb(a__ ) ) ) __snake_case , __snake_case = torch.chunk(a__ , 2 ) __snake_case = self.norm(a__ ) * (1 + scale) + shift return x class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : List[Any] , a__ : Tuple , a__ : List[str] ): """simple docstring""" super().__init__() __snake_case = CombinedTimestepLabelEmbeddings(a__ , a__ ) __snake_case = nn.SiLU() __snake_case = nn.Linear(a__ , 6 * embedding_dim , bias=a__ ) __snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1E-6 ) def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : List[str] , a__ : Dict , a__ : List[str]=None ): """simple docstring""" __snake_case = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = emb.chunk(6 , dim=1 ) __snake_case = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : Optional[int] , a__ : int , a__ : int , a__ : int , a__ : Optional[str] = None , a__ : float = 1E-5 ): """simple docstring""" super().__init__() __snake_case = num_groups __snake_case = eps if act_fn is None: __snake_case = None else: __snake_case = get_activation(a__ ) __snake_case = nn.Linear(a__ , out_dim * 2 ) def a (self : List[str] , a__ : str , a__ : Dict ): """simple docstring""" if self.act: __snake_case = self.act(a__ ) __snake_case = self.linear(a__ ) __snake_case = emb[:, :, None, None] __snake_case , __snake_case = emb.chunk(2 , dim=1 ) __snake_case = F.group_norm(a__ , self.num_groups , eps=self.eps ) __snake_case = x * (1 + scale) + shift return x
354
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
238
0
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) a : Optional[int] = logging.getLogger(__name__) a : Optional[Any] = 'Hello world! cécé herlolip' a : List[str] = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = BertAbsConfig( temp_dir='''.''', finetune_bert=__UpperCAmelCase, large=__UpperCAmelCase, share_emb=__UpperCAmelCase, use_bert_emb=__UpperCAmelCase, encoder='''bert''', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, ) snake_case_ = torch.load(__UpperCAmelCase, lambda __UpperCAmelCase, __UpperCAmelCase : storage ) snake_case_ = AbsSummarizer(__UpperCAmelCase, torch.device('''cpu''' ), __UpperCAmelCase ) original.eval() snake_case_ = BertAbsSummarizer(__UpperCAmelCase, torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) snake_case_ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs snake_case_ = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__UpperCAmelCase )) ) snake_case_ = torch.tensor(__UpperCAmelCase ).unsqueeze(0 ) snake_case_ = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__UpperCAmelCase )) ) snake_case_ = torch.tensor(__UpperCAmelCase ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass snake_case_ = encoder_input_ids snake_case_ = decoder_input_ids snake_case_ = snake_case_ = None snake_case_ = None snake_case_ = snake_case_ = None snake_case_ = snake_case_ = None snake_case_ = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical snake_case_ = original(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )[0] snake_case_ = original.generator(__UpperCAmelCase ) snake_case_ = new_model( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )[0] snake_case_ = new_model.generator(__UpperCAmelCase ) snake_case_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(__UpperCAmelCase ) ) snake_case_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(__UpperCAmelCase ) ) snake_case_ = torch.allclose(__UpperCAmelCase, __UpperCAmelCase, atol=1e-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict(), '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) a : Dict = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
56
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py A_ : List[str] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. A_ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING A_ : Dict = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: '''simple docstring''' _UpperCAmelCase : Optional[Any] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F'''config.{attribute}''' in modeling_source or F'''getattr(config, "{attribute}"''' in modeling_source or F'''getattr(self.config, "{attribute}"''' in modeling_source ): _UpperCAmelCase : Tuple = True # Deal with multi-line cases elif ( re.search( RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , ) is not None ): _UpperCAmelCase : Any = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _UpperCAmelCase : List[str] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _UpperCAmelCase : Dict = [ """bos_index""", """eos_index""", """pad_index""", """unk_index""", """mask_index""", """image_size""", """use_cache""", """out_features""", """out_indices""", ] _UpperCAmelCase : int = ["""encoder_no_repeat_ngram_size"""] # Special cases to be allowed _UpperCAmelCase : Optional[Any] = True if not attribute_used: _UpperCAmelCase : List[Any] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _UpperCAmelCase : Tuple = True elif attribute in ["tie_word_embeddings"] and default_value is False: _UpperCAmelCase : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _UpperCAmelCase : Dict = True elif attribute.endswith("""_token_id""" ): _UpperCAmelCase : Optional[int] = True # configuration class specific cases if not case_allowed: _UpperCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _UpperCAmelCase : Union[str, Any] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters ) _UpperCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]] _UpperCAmelCase : Optional[int] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _UpperCAmelCase : List[Any] = {} if len(config_class.attribute_map ) > 0: _UpperCAmelCase : Optional[int] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _UpperCAmelCase : int = inspect.getsourcefile(lowerCAmelCase_ ) _UpperCAmelCase : str = os.path.dirname(lowerCAmelCase_ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _UpperCAmelCase : Optional[int] = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith("""modeling_""" )] # Get the source code strings _UpperCAmelCase : str = [] for path in modeling_paths: if os.path.isfile(lowerCAmelCase_ ): with open(lowerCAmelCase_ ) as fp: modeling_sources.append(fp.read() ) _UpperCAmelCase : Any = [] for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): # `attributes` here is all the variant names for `config_param` _UpperCAmelCase : List[str] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): unused_attributes.append(attributes[0] ) return sorted(lowerCAmelCase_ ) def snake_case_ ( )-> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Dict = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _UpperCAmelCase : List[Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ) and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _UpperCAmelCase : Optional[int] = check_config_attributes_being_used(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: _UpperCAmelCase : Tuple = unused_attributes if len(lowerCAmelCase_ ) > 0: _UpperCAmelCase : Dict = """The following configuration classes contain unused attributes in the corresponding modeling files:\n""" for name, attributes in configs_with_unused_attributes.items(): error += F'''{name}: {attributes}\n''' raise ValueError(lowerCAmelCase_ ) if __name__ == "__main__": check_config_attributes()
215
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging A : Optional[int] = logging.get_logger(__name__) logging.set_verbosity_info() def a__ ( __UpperCamelCase , __UpperCamelCase ): if "xprophetnet" in prophetnet_checkpoint_path: SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = ["key_proj", "value_proj", "query_proj"] SCREAMING_SNAKE_CASE_ = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: SCREAMING_SNAKE_CASE_ = key.split("." ) if attributes[0] == "lm_head": SCREAMING_SNAKE_CASE_ = prophet SCREAMING_SNAKE_CASE_ = prophet_old else: SCREAMING_SNAKE_CASE_ = prophet.prophetnet SCREAMING_SNAKE_CASE_ = prophet_old.model SCREAMING_SNAKE_CASE_ = False for attribute in attributes: if attribute in mapping: SCREAMING_SNAKE_CASE_ = mapping[attribute] if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = attribute elif hasattr(__UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.weight logger.info(F'''{attribute} is initialized.''' ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.bias logger.info(F'''{attribute} is initialized''' ) SCREAMING_SNAKE_CASE_ = True break elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ): SCREAMING_SNAKE_CASE_ = old_model.in_proj_weight.shape[0] // 3 SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) SCREAMING_SNAKE_CASE_ = True break if attribute.isdigit(): SCREAMING_SNAKE_CASE_ = model[int(__UpperCamelCase )] SCREAMING_SNAKE_CASE_ = old_model[int(__UpperCamelCase )] else: SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) if old_attribute == "": SCREAMING_SNAKE_CASE_ = old_model else: if not hasattr(__UpperCamelCase , __UpperCamelCase ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Union[str, Any] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
305
from __future__ import annotations A : Dict = "#" class lowerCamelCase : """simple docstring""" def __init__( self : Dict ) -> None: SCREAMING_SNAKE_CASE_ = {} def __A ( self : List[Any] , __magic_name__ : str ) -> None: SCREAMING_SNAKE_CASE_ = self._trie for char in text: if char not in trie: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = trie[char] SCREAMING_SNAKE_CASE_ = True def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list: SCREAMING_SNAKE_CASE_ = self._trie for char in prefix: if char in trie: SCREAMING_SNAKE_CASE_ = trie[char] else: return [] return self._elements(__magic_name__ ) def __A ( self : int , __magic_name__ : dict ) -> tuple: SCREAMING_SNAKE_CASE_ = [] for c, v in d.items(): SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )] result.extend(__magic_name__ ) return tuple(__magic_name__ ) A : Union[str, Any] = Trie() A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase ) return tuple(string + word for word in suffixes ) def a__ ( ): print(autocomplete_using_trie("de" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
305
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowerCamelCase__ : List[str] = random.Random() if is_torch_available(): import torch def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any]=1.0, _lowerCAmelCase : int=None, _lowerCAmelCase : Tuple=None ) -> List[str]: if rng is None: _UpperCAmelCase : Optional[int] = global_rng _UpperCAmelCase : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _UpperCAmelCase ( unittest.TestCase): def __init__( self , _A , _A=7 , _A=4_00 , _A=20_00 , _A=1 , _A=0.0 , _A=1_60_00 , _A=True , _A=True , ) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Any = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Optional[int] = min_seq_length _UpperCAmelCase : str = max_seq_length _UpperCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase : Union[str, Any] = feature_size _UpperCAmelCase : Dict = padding_value _UpperCAmelCase : str = sampling_rate _UpperCAmelCase : Dict = return_attention_mask _UpperCAmelCase : Dict = do_normalize def __snake_case ( self ) -> Optional[int]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __snake_case ( self , _A=False , _A=False ) -> List[Any]: '''simple docstring''' def _flatten(_A ): return list(itertools.chain(*_A ) ) if equal_length: _UpperCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _UpperCAmelCase : int = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase : List[Any] = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _UpperCAmelCase ( __a , unittest.TestCase): __a : Union[str, Any] = ASTFeatureExtractor def __snake_case ( self ) -> int: '''simple docstring''' _UpperCAmelCase : Optional[int] = ASTFeatureExtractionTester(self ) def __snake_case ( self ) -> str: '''simple docstring''' _UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] _UpperCAmelCase : str = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input _UpperCAmelCase : List[str] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values _UpperCAmelCase : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched _UpperCAmelCase : Optional[Any] = feat_extract(_A , padding=_A , return_tensors="""np""" ).input_values _UpperCAmelCase : Dict = feat_extract(_A , padding=_A , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] _UpperCAmelCase : int = np.asarray(_A ) _UpperCAmelCase : Any = feat_extract(_A , return_tensors="""np""" ).input_values _UpperCAmelCase : List[Any] = feat_extract(_A , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' import torch _UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : str = np.random.rand(1_00 ).astype(np.floataa ) _UpperCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _UpperCAmelCase : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __snake_case ( self , _A ) -> int: '''simple docstring''' from datasets import load_dataset _UpperCAmelCase : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech _UpperCAmelCase : str = ds.sort("""id""" ).select(range(_A ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] @require_torch def __snake_case ( self ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on _UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 ) _UpperCAmelCase : int = ASTFeatureExtractor() _UpperCAmelCase : Any = feature_extractor(_A , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
246
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase__ : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ) -> Dict: _UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = val def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> List[str]: _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _UpperCAmelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" ) _UpperCAmelCase : Any = value else: _UpperCAmelCase : List[Any] = value return new_state_dict def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=False ) -> Optional[Any]: _UpperCAmelCase : int = """""" if is_panoptic: _UpperCAmelCase : str = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _UpperCAmelCase : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase : Any = in_proj_weight[:256, :] _UpperCAmelCase : Tuple = in_proj_bias[:256] _UpperCAmelCase : Optional[int] = in_proj_weight[256:512, :] _UpperCAmelCase : str = in_proj_bias[256:512] _UpperCAmelCase : int = in_proj_weight[-256:, :] _UpperCAmelCase : List[Any] = in_proj_bias[-256:] def UpperCamelCase ( ) -> Any: _UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ) -> List[Any]: _UpperCAmelCase : str = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _UpperCAmelCase : Dict = """resnet101""" if "dc5" in model_name: _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : Optional[Any] = """panoptic""" in model_name if is_panoptic: _UpperCAmelCase : Optional[int] = 250 else: _UpperCAmelCase : str = 91 _UpperCAmelCase : Optional[int] = """huggingface/label-files""" _UpperCAmelCase : str = """coco-detection-id2label.json""" _UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type="""dataset""" ), """r""" ) ) _UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase : List[str] = idalabel _UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} # load image processor _UpperCAmelCase : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection""" _UpperCAmelCase : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase ) # prepare image _UpperCAmelCase : List[str] = prepare_img() _UpperCAmelCase : Any = image_processor(images=_lowerCAmelCase, return_tensors="""pt""" ) _UpperCAmelCase : Any = encoding["""pixel_values"""] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub _UpperCAmelCase : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""", _lowerCAmelCase, pretrained=_lowerCAmelCase ).eval() _UpperCAmelCase : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _UpperCAmelCase : Optional[int] = """conditional_detr.""" + src rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = rename_backbone_keys(_lowerCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowerCAmelCase, is_panoptic=_lowerCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _UpperCAmelCase : List[str] = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Any = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _UpperCAmelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Any = val # finally, create HuggingFace model and load state dict _UpperCAmelCase : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() model.push_to_hub(repo_id=_lowerCAmelCase, organization="""DepuMeng""", commit_message="""Add model""" ) # verify our conversion _UpperCAmelCase : Any = conditional_detr(_lowerCAmelCase ) _UpperCAmelCase : int = model(_lowerCAmelCase ) assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1E-4 ) assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
246
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = BlipImageProcessor() UpperCamelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) UpperCamelCase = BlipProcessor(lowerCamelCase_ , lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **lowerCamelCase_ : Any ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).tokenizer def lowerCamelCase_ ( self : Tuple , **lowerCamelCase_ : Optional[Any] ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor def lowerCamelCase_ ( self : str ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCamelCase = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) UpperCamelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(lowerCamelCase_ , return_tensors="""np""" ) UpperCamelCase = processor(images=lowerCamelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) UpperCamelCase = """lower newer""" UpperCamelCase = processor(text=lowerCamelCase_ ) UpperCamelCase = tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) UpperCamelCase = """lower newer""" UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(lowerCamelCase_ ) UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) UpperCamelCase = """lower newer""" UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
352
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } _SCREAMING_SNAKE_CASE = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def lowercase( UpperCamelCase_ ) -> Tuple: '''simple docstring''' UpperCamelCase = collections.OrderedDict() with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as reader: UpperCamelCase = reader.readlines() for index, token in enumerate(UpperCamelCase_ ): UpperCamelCase = token.rstrip("""\n""" ) UpperCamelCase = index return vocab class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Any=200 ): """simple docstring""" UpperCamelCase = vocab UpperCamelCase = unk_token UpperCamelCase = max_input_chars_per_word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = list(lowerCamelCase_ ) if len(lowerCamelCase_ ) > self.max_input_chars_per_word: return [self.unk_token] UpperCamelCase = 0 UpperCamelCase = [] while start < len(lowerCamelCase_ ): UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = None while start < end: UpperCamelCase = """""".join(chars[start:end] ) if substr in self.vocab: UpperCamelCase = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCamelCase_ ) UpperCamelCase = end return sub_tokens class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] __lowerCAmelCase = False def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]="<d>" , lowerCamelCase_ : List[Any]="</d>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[Any]="<unk>" , lowerCamelCase_ : Optional[Any]="</n>" , lowerCamelCase_ : Tuple="</_>" , lowerCamelCase_ : Any="left" , **lowerCamelCase_ : str , ): """simple docstring""" requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , ) UpperCamelCase = bod_token UpperCamelCase = eod_token UpperCamelCase = load_vocab(lowerCamelCase_ ) UpperCamelCase = self.encoder[space_token] UpperCamelCase = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return self.encoder[self.bod_token] @property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return self.encoder[self.eod_token] @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return self.encoder["\n"] @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = [] for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_ ) ) return output_tokens def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [i for i in token_ids if i >= 0] UpperCamelCase = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ): """simple docstring""" return token in self.encoder def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ): """simple docstring""" return "".join(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" return self.decoder.get(lowerCamelCase_ , self.unk_token ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if os.path.isdir(lowerCamelCase_ ): UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: UpperCamelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory UpperCamelCase = 0 if " " in self.encoder: UpperCamelCase = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: UpperCamelCase = self.encoder["""\n"""] del self.encoder["\n"] UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""" ) UpperCamelCase = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) return [1] + ([0] * len(lowerCamelCase_ ))
165
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str ={ 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[str] =['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Tuple =[ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[Any] =[ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[Any] =[ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __snake_case( _lowerCAmelCase ) -> Any: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __snake_case( _lowerCAmelCase ) -> List[str]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(R"| /\ | |- | |- |--| |\ /| |-") print(R"|/ \| |- |_ |_ |__| | \/ | |_") __a = 1 while K: __a = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) __a = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
35
0
def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) a :Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
281
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = MgpstrTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = False def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # fmt: off a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :str = '''tester''' a :Union[str, Any] = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): a :Any = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ) , 1 ) a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): a , a :Tuple = self.get_input_output_texts(_lowerCamelCase ) a :Tuple = tokenizer.tokenize(_lowerCamelCase ) a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertNotEqual(len(_lowerCamelCase ) , 0 ) a :str = tokenizer.decode(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass
281
1
'''simple docstring''' import string def __lowerCamelCase ( lowerCAmelCase_ ) -> None: for key in range(len(string.ascii_uppercase ) ): _a : Union[str, Any] = '' for symbol in message: if symbol in string.ascii_uppercase: _a : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase_ ) _a : List[str] = num - key if num < 0: _a : str = num + len(string.ascii_uppercase ) _a : int = translated + string.ascii_uppercase[num] else: _a : Dict = translated + symbol print(f"""Decryption using Key #{key}: {translated}""" ) def __lowerCamelCase ( ) -> None: _a : int = input('Encrypted message: ' ) _a : Tuple = message.upper() decrypt(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return 0 elif n == 2: return 1 else: lowercase__: List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: Union[str, Any] = 0 lowercase__: List[Any] = 2 while digits < n: index += 1 lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int: return fibonacci_digits_index(__UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
177
0
'''simple docstring''' import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger __A =get_logger(__name__) class _snake_case ( enum.Enum ): lowerCAmelCase :Dict = """all_checks""" lowerCAmelCase :List[Any] = """basic_checks""" lowerCAmelCase :Optional[int] = """no_checks""" class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]: if expected_checksums is None: logger.info("""Unable to verify checksums.""" ) return if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) ) if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0: raise UnexpectedDownloadedFile(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) ) UpperCAmelCase__ : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] UpperCAmelCase__ : int = ' for ' + verification_name if verification_name is not None else '' if len(UpperCAmelCase_ ) > 0: raise NonMatchingChecksumError( f'''Checksums didn\'t match{for_verification_name}:\n''' f'''{bad_urls}\n''' """Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error""" ) logger.info("""All the checksums matched successfully""" + for_verification_name ) class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass class _snake_case ( a__ ): pass def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str: if expected_splits is None: logger.info("""Unable to verify splits sizes.""" ) return if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0: raise ExpectedMoreSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) ) if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0: raise UnexpectedSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) ) UpperCAmelCase__ : Dict = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(UpperCAmelCase_ ) > 0: raise NonMatchingSplitsSizesError(str(UpperCAmelCase_ ) ) logger.info("""All the splits matched successfully.""" ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True ) -> dict: if record_checksum: UpperCAmelCase__ : Dict = shaaaa() with open(UpperCAmelCase_ , """rb""" ) as f: for chunk in iter(lambda: f.read(1 << 2_0 ) , b"""""" ): m.update(UpperCAmelCase_ ) UpperCAmelCase__ : List[str] = m.hexdigest() else: UpperCAmelCase__ : Dict = None return {"num_bytes": os.path.getsize(UpperCAmelCase_ ), "checksum": checksum} def _UpperCamelCase ( UpperCamelCase__ ) -> Tuple: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
357
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( a__ ): lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer'''] lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor''' lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , _lowerCamelCase , _lowerCamelCase): super().__init__(_lowerCamelCase , _lowerCamelCase) def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ): UpperCAmelCase__ : List[str] = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) # add pixel_values + pixel_mask UpperCAmelCase__ : Optional[Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase) encoding.update(_lowerCamelCase) return encoding def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase) def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase) @property def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names UpperCAmelCase__ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
283
0
from __future__ import annotations import typing from collections import Counter def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> typing.Counter[int]: __lowercase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(SCREAMING_SNAKE_CASE , max_perimeter + 1 ): __lowercase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(SCREAMING_SNAKE_CASE ): __lowercase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000 ) -> int: __lowercase = pythagorean_triple(SCREAMING_SNAKE_CASE ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F'''Perimeter {solution()} has maximum solutions''')
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __lowerCamelCase ( snake_case_ ): """simple docstring""" lowerCAmelCase__ = "" lowerCAmelCase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(self , **UpperCAmelCase ) lowercase_ = repo_info lowercase_ = token lowercase_ = None def A__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: lowercase_ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowercase_ = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A__ ( self , UpperCAmelCase , UpperCAmelCase = "rb" , **UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' if not isinstance(self.repo_info , UpperCAmelCase ): raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' ) lowercase_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha ) return fsspec.open( UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open() def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' self._get_dirs() lowercase_ = self._strip_protocol(UpperCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCAmelCase ) def A__ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> List[str]: '''simple docstring''' self._get_dirs() lowercase_ = PurePosixPath(path.strip("/" ) ) lowercase_ = {} for p, f in self.dir_cache.items(): lowercase_ = PurePosixPath(p.strip("/" ) ) lowercase_ = p.parent if root == path: lowercase_ = f lowercase_ = list(paths.values() ) if detail: return out else: return sorted(f["name"] for f in out )
297
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple: '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = act_dim lowercase_ = state_dim lowercase_ = hidden_size lowercase_ = max_length lowercase_ = is_training def A__ ( self ) -> Dict: '''simple docstring''' lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) lowercase_ = random_attention_mask((self.batch_size, self.seq_length) ) lowercase_ = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def A__ ( self ) -> Optional[int]: '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' lowercase_ = DecisionTransformerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else () lowerCAmelCase__ = () lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowerCAmelCase__ = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def A__ ( self ) -> Dict: '''simple docstring''' lowercase_ = DecisionTransformerModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def A__ ( self ) -> str: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) @slow def A__ ( self ) -> Tuple: '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def A__ ( self ) -> Any: '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(UpperCAmelCase ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase ) @require_torch class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ = 2 # number of steps of autoregressive prediction we will perform lowercase_ = 10 # defined by the RL environment, may be normalized lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) lowercase_ = model.to(UpperCAmelCase ) lowercase_ = model.config torch.manual_seed(0 ) lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset() lowercase_ = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase ) lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 ) lowercase_ = state lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa ) lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa ) lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 ) for step in range(UpperCAmelCase ): lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 ) lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 ) lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): lowercase_ , lowercase_ , lowercase_ = model( states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ), 1.0, False, {}, ) lowercase_ = action_pred[0, -1] lowercase_ = torch.cat([states, state] , dim=1 ) lowercase_ = returns_to_go[0, -1] - reward lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) lowercase_ = torch.cat( [timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
297
1
'''simple docstring''' import os import pytest from attr import dataclass _SCREAMING_SNAKE_CASE : str = "us-east-1" # defaults region @dataclass class _snake_case : lowerCAmelCase_ : str lowerCAmelCase_ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" lowerCAmelCase_ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } lowerCAmelCase_ : Optional[Any] = {**hyperparameters, "max_steps": 1000} @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return F'{self.framework}-transfromers-test' @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return F'./tests/sagemaker/scripts/{self.framework}' @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCamelCase_( snake_case : Any ): '''simple docstring''' snake_case_ = SageMakerTestEnvironment(framework=request.cls.framework )
85
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE_:List[str] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( a__ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Optional[Any] = GPTSwaTokenizer __lowerCamelCase : Tuple = False __lowerCamelCase : List[str] = True __lowerCamelCase : List[Any] = False def _lowerCAmelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing A : Optional[Any] = GPTSwaTokenizer(lowerCAmelCase__, eos_token="""<unk>""", bos_token="""<unk>""", pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Any = "This is a test" A : List[Any] = "This is a test" return input_text, output_text def _lowerCAmelCase ( self ): A : Optional[Any] = "<s>" A : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ ) def _lowerCAmelCase ( self ): A : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], """<unk>""" ) self.assertEqual(vocab_keys[1], """<s>""" ) self.assertEqual(vocab_keys[-1], """j""" ) self.assertEqual(len(lowerCAmelCase__ ), 2000 ) def _lowerCAmelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size, 2000 ) def _lowerCAmelCase ( self ): A : str = GPTSwaTokenizer(lowerCAmelCase__ ) A : Dict = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ), [465, 287, 265, 631, 842] ) A : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( lowerCAmelCase__, ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""], ) # fmt: on A : List[str] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ) A : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) # fmt: off self.assertListEqual( lowerCAmelCase__, ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def _lowerCAmelCase ( self ): A : Optional[int] = GPTSwaTokenizer(lowerCAmelCase__ ) A : int = ["This is a test", "I was born in 92000, and this is falsé."] A : int = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(lowerCAmelCase__, lowerCAmelCase__ ): self.assertListEqual(tokenizer.encode_fast(lowerCAmelCase__ ), lowerCAmelCase__ ) # Test that decode_fast returns the input text for text, token_ids in zip(lowerCAmelCase__, lowerCAmelCase__ ): self.assertEqual(tokenizer.decode_fast(lowerCAmelCase__ ), lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self ): A : Optional[Any] = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off A : Optional[Any] = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name="""AI-Sweden/gpt-sw3-126m""", sequences=lowerCAmelCase__, )
363
from argparse import ArgumentParser from .env import EnvironmentCommand def __UpperCamelCase ( ) -> Dict: """simple docstring""" A : str = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) A : int = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_lowerCAmelCase ) # Let's go A : str = parser.parse_args() if not hasattr(_lowerCAmelCase , """func""" ): parser.print_help() exit(1 ) # Run A : Any = args.func(_lowerCAmelCase ) service.run() if __name__ == "__main__": main()
115
0
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , a : str , a : Any=sys.maxsize ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : str = "bilinear" SCREAMING_SNAKE_CASE : List[str] = max_size SCREAMING_SNAKE_CASE : int = short_edge_length def __call__( self : List[Any] , a : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for img in imgs: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = img.shape[:2] # later: provide list and randomly choose index for resize SCREAMING_SNAKE_CASE : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img SCREAMING_SNAKE_CASE : str = size * 1.0 / min(a , a ) if h < w: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = size, scale * w else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = scale * h, size if max(a , a ) > self.max_size: SCREAMING_SNAKE_CASE : Any = self.max_size * 1.0 / max(a , a ) SCREAMING_SNAKE_CASE : Optional[Any] = newh * scale SCREAMING_SNAKE_CASE : Any = neww * scale SCREAMING_SNAKE_CASE : Union[str, Any] = int(neww + 0.5 ) SCREAMING_SNAKE_CASE : Dict = int(newh + 0.5 ) if img.dtype == np.uinta: SCREAMING_SNAKE_CASE : Dict = Image.fromarray(a ) SCREAMING_SNAKE_CASE : Dict = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) SCREAMING_SNAKE_CASE : Tuple = np.asarray(a ) else: SCREAMING_SNAKE_CASE : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.interpolate( a , (newh, neww) , mode=self.interp_method , align_corners=a ).squeeze(0 ) img_augs.append(a ) return img_augs class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] , a : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) SCREAMING_SNAKE_CASE : int = cfg.INPUT.FORMAT SCREAMING_SNAKE_CASE : Union[str, Any] = cfg.SIZE_DIVISIBILITY SCREAMING_SNAKE_CASE : str = cfg.PAD_VALUE SCREAMING_SNAKE_CASE : int = cfg.INPUT.MAX_SIZE_TEST SCREAMING_SNAKE_CASE : Any = cfg.MODEL.DEVICE SCREAMING_SNAKE_CASE : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = lambda a : (x - self.pixel_mean) / self.pixel_std def __UpperCamelCase ( self : Tuple , a : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = tuple(max(a ) for s in zip(*[img.shape for img in images] ) ) SCREAMING_SNAKE_CASE : str = [im.shape[-2:] for im in images] SCREAMING_SNAKE_CASE : Union[str, Any] = [ nn.functional.pad( a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(a , a ) ] return torch.stack(a ), torch.tensor(a ) def __call__( self : Optional[Any] , a : List[Any] , a : Dict=False ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): if not isinstance(a , a ): SCREAMING_SNAKE_CASE : int = [images] if single_image: assert len(a ) == 1 for i in range(len(a ) ): if isinstance(images[i] , torch.Tensor ): images.insert(a , images.pop(a ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( a , torch.as_tensor(img_tensorize(images.pop(a ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] ) SCREAMING_SNAKE_CASE : Optional[int] = self.aug(a ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalizer(a ) for x in images] # now pad them to do the following operations SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.pad(a ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad SCREAMING_SNAKE_CASE : Tuple = torch.true_divide(a , a ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCamelCase__ ( _a , _a): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCamelCase__ ( _a , _a): assert torch.isfinite(_a).all(), "Box tensor contains infinite or NaN!" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = box_size tensor[:, 0].clamp_(min=0 , max=_a) tensor[:, 1].clamp_(min=0 , max=_a) tensor[:, 2].clamp_(min=0 , max=_a) tensor[:, 3].clamp_(min=0 , max=_a)
76
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __lowerCAmelCase : Any = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def a__ ( A_=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = None a__ = None def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" with TemporaryDirectory() as tmp_dir: __magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ ) __magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) __magic_name__ = builder_cls( cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , ) __magic_name__ = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) __magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ ) self.assertTrue(os.path.exists(UpperCamelCase__ ) ) @pytest.mark.integration def a__ ( A_ ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" __magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ ) __magic_name__ = import_main_class(dataset_module.module_path ) __magic_name__ = builder_cls( cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __magic_name__ = None builder_instance.download_and_prepare() __magic_name__ = builder_instance.as_dataset() assert ds @pytest.mark.integration def a__ ( A_ ): '''simple docstring''' __magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ ) __magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ ) __magic_name__ = builder_cls( cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, ) __magic_name__ = builder_instance.as_streaming_dataset() assert ds assert isinstance(A_, A_ ) assert "train" in ds assert isinstance(ds["""train"""], A_ ) assert next(iter(ds["""train"""] ) )
88
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart lowercase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } lowercase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def __lowerCAmelCase ( ): lowercase__ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : List[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Union[str, Any] =["""input_ids""", """attention_mask"""] def __init__( self : List[str], __lowercase : Any, __lowercase : Optional[int], __lowercase : Optional[Any]="replace", __lowercase : List[str]="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Any="</s>", __lowercase : List[str]="<s>", __lowercase : Any="<unk>", __lowercase : int="<pad>", __lowercase : str="<mask>", __lowercase : Any=False, **__lowercase : Optional[Any], ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else bos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else eos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else sep_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else cls_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token super().__init__( errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, unk_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, ) with open(__lowercase, encoding="utf-8" ) as vocab_handle: lowercase__ = json.load(__lowercase ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase, encoding="utf-8" ) as merges_handle: lowercase__ = merges_handle.read().split("\n" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def A__ ( self : str ): return len(self.encoder ) def A__ ( self : Optional[Any] ): return dict(self.encoder, **self.added_tokens_encoder ) def A__ ( self : Union[str, Any], __lowercase : Union[str, Any] ): if token in self.cache: return self.cache[token] lowercase__ = tuple(__lowercase ) lowercase__ = get_pairs(__lowercase ) if not pairs: return token while True: lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(__lowercase ): try: lowercase__ = word.index(__lowercase, __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(__lowercase ) lowercase__ = new_word if len(__lowercase ) == 1: break else: lowercase__ = get_pairs(__lowercase ) lowercase__ = " ".join(__lowercase ) lowercase__ = word return word def A__ ( self : Optional[Any], __lowercase : Dict ): lowercase__ = [] for token in re.findall(self.pat, __lowercase ): lowercase__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(" " ) ) return bpe_tokens def A__ ( self : int, __lowercase : Union[str, Any] ): return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) ) def A__ ( self : str, __lowercase : Dict ): return self.decoder.get(__lowercase ) def A__ ( self : List[Any], __lowercase : Optional[Any] ): lowercase__ = "".join(__lowercase ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors ) return text def A__ ( self : List[str], __lowercase : str, __lowercase : Optional[str] = None ): if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" ) lowercase__ = 0 with open(__lowercase, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowercase__ = token_index writer.write(" ".join(__lowercase ) + "\n" ) index += 1 return vocab_file, merge_file def A__ ( self : Optional[int], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self : Dict, __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def A__ ( self : List[str], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Optional[Any], __lowercase : int, __lowercase : List[str]=False, **__lowercase : Union[str, Any] ): lowercase__ = kwargs.pop("add_prefix_space", self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): lowercase__ = " " + text return (text, kwargs)
224
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class _snake_case ( lowercase__): UpperCamelCase__ : Optional[Any] ="""transfo-xl""" UpperCamelCase__ : Dict =["""mems"""] UpperCamelCase__ : Optional[int] ={ """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[Any], __lowercase : Optional[Any]=26_7735, __lowercase : int=[2_0000, 4_0000, 20_0000], __lowercase : Union[str, Any]=1024, __lowercase : Tuple=1024, __lowercase : Tuple=16, __lowercase : Optional[Any]=64, __lowercase : str=4096, __lowercase : Optional[int]=4, __lowercase : Union[str, Any]=False, __lowercase : Union[str, Any]=18, __lowercase : List[str]=1600, __lowercase : List[Any]=1000, __lowercase : Union[str, Any]=True, __lowercase : Tuple=True, __lowercase : Optional[Any]=0, __lowercase : List[str]=-1, __lowercase : int=True, __lowercase : Dict=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : str=True, __lowercase : Optional[Any]="normal", __lowercase : str=0.01, __lowercase : Tuple=0.01, __lowercase : List[Any]=0.02, __lowercase : Any=1e-5, __lowercase : Union[str, Any]=0, **__lowercase : Union[str, Any], ): lowercase__ = vocab_size lowercase__ = [] self.cutoffs.extend(__lowercase ) if proj_share_all_but_first: lowercase__ = [False] + [True] * len(self.cutoffs ) else: lowercase__ = [False] + [False] * len(self.cutoffs ) lowercase__ = d_model lowercase__ = d_embed lowercase__ = d_head lowercase__ = d_inner lowercase__ = div_val lowercase__ = pre_lnorm lowercase__ = n_layer lowercase__ = n_head lowercase__ = mem_len lowercase__ = same_length lowercase__ = attn_type lowercase__ = clamp_len lowercase__ = sample_softmax lowercase__ = adaptive lowercase__ = dropout lowercase__ = dropatt lowercase__ = untie_r lowercase__ = init lowercase__ = init_range lowercase__ = proj_init_std lowercase__ = init_std lowercase__ = layer_norm_epsilon super().__init__(eos_token_id=__lowercase, **__lowercase ) @property def A__ ( self : Optional[Any] ): # Message copied from Transformer-XL documentation logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def A__ ( self : List[str], __lowercase : Union[str, Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
224
1
'''simple docstring''' from math import isqrt def _A ( snake_case ) -> list[int]: _lowercase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case , snake_case ): _lowercase : Tuple = False return [i for i in range(2 , snake_case ) if is_prime[i]] def _A ( snake_case = 10**8 ) -> int: _lowercase : str = calculate_prime_numbers(max_number // 2 ) _lowercase : str = 0 _lowercase : Union[str, Any] = 0 _lowercase : Dict = len(snake_case ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
250
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _snake_case = random.Random() def _A ( snake_case , snake_case=1.0 , snake_case=None , snake_case=None ) -> Optional[Any]: if rng is None: _lowercase : List[str] = global_rng _lowercase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class a__ ( unittest.TestCase ): def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=400 , _UpperCamelCase=2000 , _UpperCamelCase=10 , _UpperCamelCase=160 , _UpperCamelCase=8 , _UpperCamelCase=0.0 , _UpperCamelCase=4000 , _UpperCamelCase=False , _UpperCamelCase=True , ): """simple docstring""" _lowercase : int = parent _lowercase : Optional[int] = batch_size _lowercase : List[Any] = min_seq_length _lowercase : Union[str, Any] = max_seq_length _lowercase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowercase : Union[str, Any] = padding_value _lowercase : Dict = sampling_rate _lowercase : Any = return_attention_mask _lowercase : Union[str, Any] = do_normalize _lowercase : int = feature_size _lowercase : str = chunk_length _lowercase : Any = hop_length def _lowerCamelCase ( self ): """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCamelCase ( self , _UpperCamelCase=False , _UpperCamelCase=False ): """simple docstring""" def _flatten(_UpperCamelCase ): return list(itertools.chain(*_UpperCamelCase ) ) if equal_length: _lowercase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _lowercase : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class a__ ( lowerCamelCase_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Tuple = WhisperFeatureExtractor if is_speech_available() else None def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Union[str, Any] = WhisperFeatureExtractionTester(self ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : List[Any] = feat_extract_first.save_pretrained(_UpperCamelCase )[0] check_json_file_has_correct_format(_UpperCamelCase ) _lowercase : Tuple = self.feature_extraction_class.from_pretrained(_UpperCamelCase ) _lowercase : List[Any] = feat_extract_first.to_dict() _lowercase : List[str] = feat_extract_second.to_dict() _lowercase : Tuple = feat_extract_first.mel_filters _lowercase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Optional[int] = os.path.join(_UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(_UpperCamelCase ) _lowercase : Any = self.feature_extraction_class.from_json_file(_UpperCamelCase ) _lowercase : List[Any] = feat_extract_first.to_dict() _lowercase : str = feat_extract_second.to_dict() _lowercase : List[str] = feat_extract_first.mel_filters _lowercase : Optional[Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _lowercase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _lowercase : int = feature_extractor(_UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _lowercase : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _lowercase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) # Test batched _lowercase : Dict = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features _lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _lowercase : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)] _lowercase : List[str] = np.asarray(_UpperCamelCase ) _lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features _lowercase : str = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) # Test truncation required _lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _lowercase : List[str] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs] _lowercase : Any = [x[: feature_extractor.n_samples] for x in speech_inputs] _lowercase : Any = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs_truncated] _lowercase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features _lowercase : Union[str, Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) ) def _lowerCamelCase ( self ): """simple docstring""" import torch _lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowercase : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) _lowercase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _lowercase : Optional[int] = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _lowerCamelCase ( self ): """simple docstring""" _lowercase : str = torch.tensor( [ 0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1, 0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8, 0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4, -0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4 ] ) # fmt: on _lowercase : str = self._load_datasamples(1 ) _lowercase : Union[str, Any] = WhisperFeatureExtractor() _lowercase : Any = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCamelCase , atol=1E-4 ) ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowercase : str = self._load_datasamples(1 )[0] _lowercase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue _lowercase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCamelCase )[0] self.assertTrue(np.all(np.mean(_UpperCamelCase ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase ) - 1 ) < 1E-3 ) )
250
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None ): __lowerCAmelCase : Tuple = {} if top_k is not None: __lowerCAmelCase : Any = top_k return {}, {}, postprocess_params def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = load_image(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) return model_inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = self.model(**_SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ): if top_k > self.model.config.num_labels: __lowerCAmelCase : List[Any] = self.model.config.num_labels if self.framework == "pt": __lowerCAmelCase : str = model_outputs.logits.softmax(-1 )[0] __lowerCAmelCase , __lowerCAmelCase : Optional[int] = probs.topk(_SCREAMING_SNAKE_CASE ) elif self.framework == "tf": __lowerCAmelCase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0] __lowerCAmelCase : str = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) __lowerCAmelCase : int = scores.tolist() __lowerCAmelCase : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
182
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Optional[int] = ShapEPipeline A_ : str = ['prompt'] A_ : Any = ['prompt'] A_ : List[Any] = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : Optional[int] = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 8 @property def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE ) @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Dict = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = self.dummy_prior __lowerCAmelCase : str = self.dummy_text_encoder __lowerCAmelCase : List[Any] = self.dummy_tokenizer __lowerCAmelCase : str = self.dummy_renderer __lowerCAmelCase : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) __lowerCAmelCase : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : Any = 'cpu' __lowerCAmelCase : int = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Optional[Any] = output.images[0] __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCAmelCase : str = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = torch_device == 'cpu' __lowerCAmelCase : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.get_dummy_components() __lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = 1 __lowerCAmelCase : List[Any] = 2 __lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase : Dict = batch_size * [inputs[key]] __lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' ) __lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __lowerCAmelCase : List[str] = pipe( 'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
182
1
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase = len(set_a.intersection(UpperCamelCase_ ) ) if alternative_union: lowercase = len(UpperCamelCase_ ) + len(UpperCamelCase_ ) else: lowercase = len(set_a.union(UpperCamelCase_ ) ) return intersection / union if isinstance(UpperCamelCase_ , (list, tuple) ) and isinstance(UpperCamelCase_ , (list, tuple) ): lowercase = [element for element in set_a if element in set_b] if alternative_union: lowercase = len(UpperCamelCase_ ) + len(UpperCamelCase_ ) return len(UpperCamelCase_ ) / union else: lowercase = set_a + [element for element in set_b if element not in set_a] return len(UpperCamelCase_ ) / len(UpperCamelCase_ ) return len(UpperCamelCase_ ) / len(UpperCamelCase_ ) return None if __name__ == "__main__": lowercase__ :Optional[int] = {"a", "b", "c", "d", "e"} lowercase__ :List[str] = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
101
import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = {} snake_case = job['''started_at'''] snake_case = job['''completed_at'''] snake_case = date_parser.parse(UpperCamelCase_ ) snake_case = date_parser.parse(UpperCamelCase_ ) snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 ) snake_case = start snake_case = end snake_case = duration_in_min return job_info def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ): """simple docstring""" snake_case = None if token is not None: snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''} snake_case = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' snake_case = requests.get(UpperCamelCase_ ,headers=UpperCamelCase_ ).json() snake_case = {} try: job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} ) snake_case = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(UpperCamelCase_ ): snake_case = requests.get(url + F'''&page={i + 2}''' ,headers=UpperCamelCase_ ).json() job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : int = get_job_time(args.workflow_run_id) _SCREAMING_SNAKE_CASE : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v['duration']}''')
127
0
from string import ascii_uppercase lowerCAmelCase__ : Optional[Any] ={str(ord(c) - 55): c for c in ascii_uppercase} def a__ ( A__, A__ ): if isinstance(A__, A__ ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(A__, A__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(A__, A__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 3_6: raise ValueError('base must be <= 36' ) SCREAMING_SNAKE_CASE_ : Any = '' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : List[Any] = 0 while div != 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(A__, A__ ) if base >= 1_1 and 9 < mod < 3_6: SCREAMING_SNAKE_CASE_ : Any = ALPHABET_VALUES[str(A__ )] else: SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ ) new_value += actual_value SCREAMING_SNAKE_CASE_ : List[Any] = num // base SCREAMING_SNAKE_CASE_ : int = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(A__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(10_00): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
162
from typing import Any def a__ ( A__, A__, A__, A__, A__, ): _validation( A__, A__, A__, A__, A__, ) # Creates data structures and fill initial step SCREAMING_SNAKE_CASE_ : dict = {} SCREAMING_SNAKE_CASE_ : dict = {} for state in states_space: SCREAMING_SNAKE_CASE_ : int = observations_space[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE_ : str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1, len(A__ ) ): SCREAMING_SNAKE_CASE_ : List[str] = observations_space[o] SCREAMING_SNAKE_CASE_ : str = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : str = -1 for k_state in states_space: SCREAMING_SNAKE_CASE_ : List[str] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: SCREAMING_SNAKE_CASE_ : Tuple = probability SCREAMING_SNAKE_CASE_ : Optional[int] = k_state # Update probabilities and pointers dicts SCREAMING_SNAKE_CASE_ : List[Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE_ : Tuple = arg_max # The final observation SCREAMING_SNAKE_CASE_ : Optional[int] = observations_space[len(A__ ) - 1] # argmax for given final observation SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : List[str] = -1 for k_state in states_space: SCREAMING_SNAKE_CASE_ : int = probabilities[(k_state, final_observation)] if probability > max_probability: SCREAMING_SNAKE_CASE_ : List[Any] = probability SCREAMING_SNAKE_CASE_ : Tuple = k_state SCREAMING_SNAKE_CASE_ : Optional[Any] = arg_max # Process pointers backwards SCREAMING_SNAKE_CASE_ : Union[str, Any] = last_state SCREAMING_SNAKE_CASE_ : List[str] = [] for o in range(len(A__ ) - 1, -1, -1 ): result.append(A__ ) SCREAMING_SNAKE_CASE_ : Tuple = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( A__, A__, A__, A__, A__, ): _validate_not_empty( A__, A__, A__, A__, A__, ) _validate_lists(A__, A__ ) _validate_dicts( A__, A__, A__ ) def a__ ( A__, A__, A__, A__, A__, ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def a__ ( A__, A__ ): _validate_list(A__, 'observations_space' ) _validate_list(A__, 'states_space' ) def a__ ( A__, A__ ): if not isinstance(_object, A__ ): SCREAMING_SNAKE_CASE_ : List[str] = F'''{var_name} must be a list''' raise ValueError(A__ ) else: for x in _object: if not isinstance(A__, A__ ): SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a list of strings''' raise ValueError(A__ ) def a__ ( A__, A__, A__, ): _validate_dict(A__, 'initial_probabilities', A__ ) _validate_nested_dict(A__, 'transition_probabilities' ) _validate_nested_dict(A__, 'emission_probabilities' ) def a__ ( A__, A__ ): _validate_dict(_object, A__, A__ ) for x in _object.values(): _validate_dict(A__, A__, A__, A__ ) def a__ ( A__, A__, A__, A__ = False ): if not isinstance(_object, A__ ): SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a dict''' raise ValueError(A__ ) if not all(isinstance(A__, A__ ) for x in _object ): SCREAMING_SNAKE_CASE_ : Optional[int] = F'''{var_name} all keys must be strings''' raise ValueError(A__ ) if not all(isinstance(A__, A__ ) for x in _object.values() ): SCREAMING_SNAKE_CASE_ : Optional[Any] = 'nested dictionary ' if nested else '' SCREAMING_SNAKE_CASE_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(A__ ) if __name__ == "__main__": from doctest import testmod testmod()
162
1
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def a__ ( SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" ) if "model" in sd.keys(): lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"] # pop unnecessary weights lowerCAmelCase : Optional[Any] = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(SCREAMING_SNAKE_CASE ) lowerCAmelCase : List[str] = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowerCAmelCase : Any = sd.pop(SCREAMING_SNAKE_CASE ) lowerCAmelCase : List[str] = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowerCAmelCase : Dict = sd[key] # We split QKV in separate Q,K,V lowerCAmelCase : Optional[Any] = key.replace(".qkv_proj." , ".q_proj." ) lowerCAmelCase : str = key.replace(".qkv_proj." , ".k_proj." ) lowerCAmelCase : Tuple = key.replace(".qkv_proj." , ".v_proj." ) lowerCAmelCase : List[str] = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = torch.split(SCREAMING_SNAKE_CASE , depth // 3 , dim=0 ) lowerCAmelCase : List[Any] = q lowerCAmelCase : str = k lowerCAmelCase : Any = v del sd[key] return sd @torch.no_grad() def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' lowerCAmelCase : Any = load_checkpoint(SCREAMING_SNAKE_CASE ) if config is not None: lowerCAmelCase : Optional[Any] = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: lowerCAmelCase : Optional[int] = OPTConfig() lowerCAmelCase : Any = OPTModel(SCREAMING_SNAKE_CASE ).half().eval() model.load_state_dict(SCREAMING_SNAKE_CASE ) # Check results Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fairseq_path''', type=str, help=( '''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:''' ''' https://huggingface.co/models?other=opt_metasq''' ), ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''') lowerCAmelCase__ = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
108
"""simple docstring""" from __future__ import annotations _lowercase : Dict = 1.6_021E-19 # units = C def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ): """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
238
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def __lowercase ( a__ ) -> Dict: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCAmelCase_ : '''simple docstring''' def _A ( self , _A , _A ): '''simple docstring''' pass def _A ( self ): '''simple docstring''' pass def _A ( self ): '''simple docstring''' pass def _A ( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase__ , UpperCamelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def _A ( self , _A , _A , _A , _A , _A=None , **_A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def _A ( self , _A , _A , _A , _A , _A=None , **_A ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = {'vision_model': vision_model, 'text_model': text_model} __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _A ( self , _A , _A , _A , _A , _A=None , **_A ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = {'vision_model': vision_model, 'text_model': text_model} __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = after_output[0] __SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def _A ( self , _A , _A , _A , _A , _A=None , **_A ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = {'vision_model': vision_model, 'text_model': text_model} __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model( input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size ) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size ) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __SCREAMING_SNAKE_CASE = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __SCREAMING_SNAKE_CASE = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _A ( self , _A , _A , _A ): '''simple docstring''' pt_model.to(UpperCamelCase__ ) pt_model.eval() # prepare inputs __SCREAMING_SNAKE_CASE = inputs_dict __SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __SCREAMING_SNAKE_CASE = pt_model(**UpperCamelCase__ ).to_tuple() __SCREAMING_SNAKE_CASE = fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = fx_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ ) pt_model_loaded.to(UpperCamelCase__ ) pt_model_loaded.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE = pt_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output_loaded.numpy() , 4e-2 ) def _A ( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = fx_state self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _A ( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params ) self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase__ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase__ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase__ ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase__ ) @is_pt_flax_cross_test def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_inputs_dict.pop('vision_config' ) __SCREAMING_SNAKE_CASE = config_inputs_dict.pop('text_config' ) __SCREAMING_SNAKE_CASE = config_inputs_dict self.check_equivalence_pt_to_flax(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.check_equivalence_flax_to_pt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @slow def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_pretrained_model_and_inputs() __SCREAMING_SNAKE_CASE = model_a(**UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = model_a(**UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = after_outputs[0] __SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @require_flax class UpperCAmelCase_ ( _snake_case , unittest.TestCase ): '''simple docstring''' def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , ) __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] ) __SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _A ( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = FlaxBertModelTester(self ) __SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCAmelCase_ ( _snake_case , unittest.TestCase ): '''simple docstring''' def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , ) __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] ) __SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _A ( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxCLIPVisionModel(UpperCamelCase__ ) __SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxCLIPVisionModelTester(self ) __SCREAMING_SNAKE_CASE = FlaxBertModelTester(self ) __SCREAMING_SNAKE_CASE = clip_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) __SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) __SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) __SCREAMING_SNAKE_CASE = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='np' ) __SCREAMING_SNAKE_CASE = model(**UpperCamelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __SCREAMING_SNAKE_CASE = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1e-3 ) )
363
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = '''Wav2Vec2FeatureExtractor''' UpperCamelCase__ : Union[str, Any] = '''AutoTokenizer''' def __init__( self , _A , _A ): '''simple docstring''' super().__init__(_A , _A ) __SCREAMING_SNAKE_CASE = self.feature_extractor __SCREAMING_SNAKE_CASE = False @classmethod def _A ( cls , _A , **_A ): '''simple docstring''' try: return super().from_pretrained(_A , **_A ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" ' include a `tokenizer_class` attribute is deprecated and will be ' 'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`' ' attribute to either your `config.json` or `tokenizer_config.json` ' 'file to suppress this warning: ' , _A , ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer.from_pretrained(_A , **_A ) return cls(feature_extractor=_A , tokenizer=_A ) def __call__( self , *_A , **_A ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*_A , **_A ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) __SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' ) else: __SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A ) __SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A ) __SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A ) if len(_A ) > 0: __SCREAMING_SNAKE_CASE = args[0] __SCREAMING_SNAKE_CASE = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) if text is not None: __SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A ) if text is None: return inputs elif audio is None: return encodings else: __SCREAMING_SNAKE_CASE = encodings['input_ids'] return inputs def _A ( self , *_A , **_A ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*_A , **_A ) __SCREAMING_SNAKE_CASE = kwargs.pop('input_features' , _A ) __SCREAMING_SNAKE_CASE = kwargs.pop('labels' , _A ) if len(_A ) > 0: __SCREAMING_SNAKE_CASE = args[0] __SCREAMING_SNAKE_CASE = args[1:] if input_features is not None: __SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_A , *_A , **_A ) if labels is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.pad(_A , **_A ) if labels is None: return input_features elif input_features is None: return labels else: __SCREAMING_SNAKE_CASE = labels['input_ids'] return input_features def _A ( self , *_A , **_A ): '''simple docstring''' return self.tokenizer.batch_decode(*_A , **_A ) def _A ( self , *_A , **_A ): '''simple docstring''' return self.tokenizer.decode(*_A , **_A ) @contextmanager def _A ( self ): '''simple docstring''' warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer yield __SCREAMING_SNAKE_CASE = self.feature_extractor __SCREAMING_SNAKE_CASE = False
118
0
'''simple docstring''' import math def __snake_case ( UpperCAmelCase_ : int ): lowerCamelCase_ = 0 lowerCamelCase_ = 0 while num > 0: lowerCamelCase_ = num % 8 lowerCamelCase_ = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase_ ) )) counter += 1 lowerCamelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'''0o{int(UpperCAmelCase_ )}''' def __snake_case ( ): print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
55
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a__ = { """configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""], """feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""], """processing_wav2vec2""": ["""Wav2Vec2Processor"""], """tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ """WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Wav2Vec2ForAudioFrameClassification""", """Wav2Vec2ForCTC""", """Wav2Vec2ForMaskedLM""", """Wav2Vec2ForPreTraining""", """Wav2Vec2ForSequenceClassification""", """Wav2Vec2ForXVector""", """Wav2Vec2Model""", """Wav2Vec2PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ """TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWav2Vec2ForCTC""", """TFWav2Vec2Model""", """TFWav2Vec2PreTrainedModel""", """TFWav2Vec2ForSequenceClassification""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ """FlaxWav2Vec2ForCTC""", """FlaxWav2Vec2ForPreTraining""", """FlaxWav2Vec2Model""", """FlaxWav2Vec2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
317
0
"""simple docstring""" def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("""The given input must be positive""" ) # get the generated string sequence UpperCAmelCase = gray_code_sequence_string(__lowerCamelCase ) # # convert them to integers for i in range(len(__lowerCamelCase ) ): UpperCAmelCase = int(sequence[i] , 2 ) return sequence def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase = "0" + smaller_sequence[i] sequence.append(__lowerCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase = "1" + smaller_sequence[i] sequence.append(__lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
351
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline lowerCAmelCase_ : Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , ): '''simple docstring''' output_path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , use_external_data_format=lowerCAmelCase , enable_onnx_checker=lowerCAmelCase , opset_version=lowerCAmelCase , ) else: export( lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , opset_version=lowerCAmelCase , ) @torch.no_grad() def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ): '''simple docstring''' UpperCAmelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCAmelCase = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: UpperCAmelCase = """cpu""" UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=lowerCAmelCase ).to(lowerCAmelCase ) UpperCAmelCase = Path(lowerCAmelCase ) # TEXT ENCODER UpperCAmelCase = pipeline.text_encoder.config.max_position_embeddings UpperCAmelCase = pipeline.text_encoder.config.hidden_size UpperCAmelCase = pipeline.tokenizer( """A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """sequence"""}, } , opset=lowerCAmelCase , ) del pipeline.text_encoder # UNET UpperCAmelCase = pipeline.unet.config.in_channels UpperCAmelCase = pipeline.unet.config.sample_size UpperCAmelCase = output_path / """unet""" / """model.onnx""" onnx_export( pipeline.unet , model_args=( torch.randn(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), torch.randn(2 ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), torch.randn(2 , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), False, ) , output_path=lowerCAmelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """timestep""": {0: """batch"""}, """encoder_hidden_states""": {0: """batch""", 1: """sequence"""}, } , opset=lowerCAmelCase , use_external_data_format=lowerCAmelCase , ) UpperCAmelCase = str(unet_path.absolute().as_posix() ) UpperCAmelCase = os.path.dirname(lowerCAmelCase ) UpperCAmelCase = onnx.load(lowerCAmelCase ) # clean up existing tensor files shutil.rmtree(lowerCAmelCase ) os.mkdir(lowerCAmelCase ) # collate external tensor files into one onnx.save_model( lowerCAmelCase , lowerCAmelCase , save_as_external_data=lowerCAmelCase , all_tensors_to_one_file=lowerCAmelCase , location="""weights.pb""" , convert_attribute=lowerCAmelCase , ) del pipeline.unet # VAE ENCODER UpperCAmelCase = pipeline.vae UpperCAmelCase = vae_encoder.config.in_channels UpperCAmelCase = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder UpperCAmelCase = lambda lowerCAmelCase , lowerCAmelCase : vae_encoder.encode(lowerCAmelCase , lowerCAmelCase )[0].sample() onnx_export( lowerCAmelCase , model_args=( torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), False, ) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowerCAmelCase , ) # VAE DECODER UpperCAmelCase = pipeline.vae UpperCAmelCase = vae_decoder.config.latent_channels UpperCAmelCase = vae_decoder.config.out_channels # forward only through the decoder part UpperCAmelCase = vae_encoder.decode onnx_export( lowerCAmelCase , model_args=( torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowerCAmelCase , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: UpperCAmelCase = pipeline.safety_checker UpperCAmelCase = safety_checker.config.vision_config.num_channels UpperCAmelCase = safety_checker.config.vision_config.image_size UpperCAmelCase = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ), ) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={ """clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""}, } , opset=lowerCAmelCase , ) del pipeline.safety_checker UpperCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" ) UpperCAmelCase = pipeline.feature_extractor else: UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(lowerCAmelCase ) print("""ONNX pipeline saved to""" , lowerCAmelCase ) del pipeline del onnx_pipeline UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase , provider="""CPUExecutionProvider""" ) print("""ONNX pipeline is loadable""" ) if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=1_4, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') lowerCAmelCase_ : Union[str, Any] = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
248
0
'''simple docstring''' def snake_case_ ( lowerCAmelCase_ = 1000000 )-> str: '''simple docstring''' _UpperCAmelCase : Optional[Any] = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , snake_case__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
215
"""simple docstring""" from math import ceil def A ( snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = list(range(0 , snake_case__ ) ) SCREAMING_SNAKE_CASE__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check SCREAMING_SNAKE_CASE__ = [] for i in device_map_blocks: if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case__ ) # Missing blocks SCREAMING_SNAKE_CASE__ = [i for i in blocks if i not in device_map_blocks] SCREAMING_SNAKE_CASE__ = [i for i in device_map_blocks if i not in blocks] if len(snake_case__ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(snake_case__ ) ) if len(snake_case__ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(snake_case__ ) ) def A ( snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = list(range(snake_case__ ) ) SCREAMING_SNAKE_CASE__ = int(ceil(n_layers / len(snake_case__ ) ) ) SCREAMING_SNAKE_CASE__ = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )] return dict(zip(snake_case__ , snake_case__ ) )
165
0
import numpy as np def _lowerCAmelCase ( __lowerCAmelCase ) -> np.array: """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
359
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class a : def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,): snake_case__ : Optional[int] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[Any] = seq_length snake_case__ : Tuple = is_training snake_case__ : Optional[Any] = use_input_mask snake_case__ : List[Any] = use_token_type_ids snake_case__ : str = use_labels snake_case__ : List[Any] = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = intermediate_multiple_size snake_case__ : Tuple = hidden_act snake_case__ : Optional[Any] = hidden_dropout snake_case__ : str = attention_dropout snake_case__ : List[str] = weight_tying snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Optional[int] = type_vocab_size snake_case__ : str = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : int = num_labels snake_case__ : int = num_choices snake_case__ : int = scope def __lowerCamelCase ( self :List[str] ): snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ : str = None if self.use_input_mask: snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) snake_case__ : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :int ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,) def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs() snake_case__ : Union[str, Any] = True return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ): snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase ) snake_case__ : Optional[Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ): snake_case__ : Any = True snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : str = model(__lowercase ,attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ): snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ): snake_case__ : Optional[int] = True snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase ) snake_case__ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 ) snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 ) snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase ) snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0] snake_case__ : List[str] = model( __lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0] # select random slice snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item() snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) ) def __lowerCamelCase ( self :Dict ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () __lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () __lowerCAmelCase : int = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : str = False def __lowerCamelCase ( self :Any ): snake_case__ : int = GPTNeoXJapaneseModelTester(self ) snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 ) def __lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): # This regression test was failing with PyTorch < 1.3 snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case__ : List[str] = None self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :str ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) @slow def __lowerCamelCase ( self :Dict ): snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b''' snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] snake_case__ : Optional[int] = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase ) snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase ) snake_case__ : Optional[int] = [] for prompt in prompts: snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 ) snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase ) predicted_outputs += generated_string self.assertListEqual(__lowercase ,__lowercase )
44
0
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes: '''simple docstring''' if not isinstance(_snake_case , _snake_case ): __magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_snake_case ) __magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data ) __magic_name__ : List[Any] = len(_snake_case ) % 6 != 0 if padding_needed: # The padding that will be added later __magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_snake_case ) % 6) else: __magic_name__ : List[str] = B"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_snake_case ) , 6 ) ).encode() + padding ) def lowerCAmelCase_ ( _snake_case : str ) -> bytes: '''simple docstring''' if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ): __magic_name__ : List[str] = ( "argument should be a bytes-like object or ASCII string, " F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_snake_case ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_snake_case , _snake_case ): try: __magic_name__ : List[Any] = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) __magic_name__ : List[str] = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __magic_name__ : Optional[int] = encoded_data[:-padding] __magic_name__ : Dict = "".join( bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __magic_name__ : Union[str, Any] = "".join( bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data ) __magic_name__ : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_snake_case ) , 8 ) ] return bytes(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
281
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCAmelCase_ ( ) -> Tuple: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCAmelCase_ ( ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : Dict = "mock-s3-bucket" __magic_name__ : Any = F'''s3://{mock_bucket}''' __magic_name__ : str = extract_path_from_uri(_snake_case ) assert dataset_path.startswith("s3://" ) is False __magic_name__ : Tuple = "./local/path" __magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case ) assert dataset_path == new_dataset_path def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' __magic_name__ : str = is_remote_filesystem(_snake_case ) assert is_remote is True __magic_name__ : Optional[int] = fsspec.filesystem("file" ) __magic_name__ : int = is_remote_filesystem(_snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , _snake_case ) def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' __magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} __magic_name__ : str = input_paths[compression_fs_class.protocol] if input_path is None: __magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_snake_case ) __magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case ) assert isinstance(_snake_case , _snake_case ) __magic_name__ : int = os.path.basename(_snake_case ) __magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str: '''simple docstring''' __magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} __magic_name__ : int = compressed_file_paths[protocol] __magic_name__ : Tuple = "dataset.jsonl" __magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}''' __magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case ) assert fs.isfile(_snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str: '''simple docstring''' __magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case ) __magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(_snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def lowerCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' __magic_name__ : Optional[Any] = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_snake_case , _snake_case , clobber=_snake_case ) with pytest.warns(_snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_snake_case ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
281
1
from timeit import timeit __UpperCamelCase : Dict = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def A ( _lowercase ): SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : str = len(_lowercase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def A ( _lowercase ): SCREAMING_SNAKE_CASE : Tuple = len(_lowercase ) // 2 SCREAMING_SNAKE_CASE : List[str] = len(_lowercase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_lowercase ) ) def A ( _lowercase ): if len(_lowercase ) <= 2: return True if s[0] == s[len(_lowercase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def A ( _lowercase ): return s == s[::-1] def A ( _lowercase ): SCREAMING_SNAKE_CASE : Any = f"""all({name}(key) is value for key, value in test_data.items())""" SCREAMING_SNAKE_CASE : str = f"""from __main__ import test_data, {name}""" SCREAMING_SNAKE_CASE : List[Any] = 500_000 SCREAMING_SNAKE_CASE : List[Any] = timeit(stmt=_lowercase , setup=_lowercase , number=_lowercase ) print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"""{key:21} {value}""") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
362
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __UpperCamelCase : Dict = logging.get_logger(__name__) def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight'''] SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias'''] SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight'''] SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias'''] return model def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight'''] SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias'''] return model def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight'''] SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W'''] return model @torch.no_grad() def A ( _lowercase , _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' ) SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream'''] SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained( _lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase ) SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase ) elif arch.endswith('''ForAudioFrameClassification''' ): SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase ) elif arch.endswith('''ForXVector''' ): SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __UpperCamelCase : Union[str, Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
258
0
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y ) def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def a__ ( SCREAMING_SNAKE_CASE : int = 2_0 ): '''simple docstring''' lowerCAmelCase : int = 1 for i in range(1 , n + 1 ): lowerCAmelCase : List[str] = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return g if __name__ == "__main__": print(F"{solution() = }")
108
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
0
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) _A = logging.getLogger(__name__) if __name__ == "__main__": _A = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=30_522, type=int) _A = parser.parse_args() logger.info(f"Loading data from {args.data_file}") with open(args.data_file, '''rb''') as fp: _A = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') _A = Counter() for tk_ids in data: counter.update(tk_ids) _A = [0] * args.vocab_size for k, v in counter.items(): _A = v logger.info(f"Dump to {args.token_counts_dump}") with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
368
def __UpperCamelCase ( _A ): if length <= 0 or not isinstance(_A , _A ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(_A )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
167
0
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> bool: if len(__UpperCamelCase ) == 0: return False UpperCAmelCase_ = len(__UpperCamelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , __UpperCamelCase ) else: return binary_search(a_list[midpoint + 1 :] , __UpperCamelCase ) if __name__ == "__main__": _lowerCamelCase = input('Enter numbers separated by comma:\n').strip() _lowerCamelCase = [int(item.strip()) for item in user_input.split(',')] _lowerCamelCase = int(input('Enter the number to be found in the list:\n').strip()) _lowerCamelCase = '' if binary_search(sequence, target) else 'not ' print(F"{target} was {not_str}found in {sequence}")
177
import numpy as np def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
177
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , a : List[str] , a : str=7 , a : int=3 , a : Dict=18 , a : Optional[Any]=30 , a : int=400 , a : str=True , a : Dict=None , a : str=True , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Any = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE : Optional[int] = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : List[Any] = min_resolution SCREAMING_SNAKE_CASE : str = max_resolution SCREAMING_SNAKE_CASE : List[Any] = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : Dict = do_normalize def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =ImageGPTImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : Dict ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = ImageGPTImageProcessingTester(self ) @property def __UpperCamelCase ( self : str ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , "clusters" ) ) self.assertTrue(hasattr(a , "do_resize" ) ) self.assertTrue(hasattr(a , "size" ) ) self.assertTrue(hasattr(a , "do_normalize" ) ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : Any = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(a , obj[key] ) ) else: self.assertEqual(obj[key] , a ) def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(a , "image_processor.json" ) image_processor_first.to_json_file(a ) SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_json_file(a ).to_dict() SCREAMING_SNAKE_CASE : Optional[Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(a , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , a ) def __UpperCamelCase ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(a ) SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_pretrained(a ).to_dict() SCREAMING_SNAKE_CASE : Optional[Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(a , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , a ) @unittest.skip("ImageGPT requires clusters at initialization" ) def __UpperCamelCase ( self : Dict ) -> Any: """simple docstring""" pass def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : List[Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test") SCREAMING_SNAKE_CASE : Dict = Image.open(dataset[4]["file"]) SCREAMING_SNAKE_CASE : List[Any] = Image.open(dataset[5]["file"]) SCREAMING_SNAKE_CASE : List[Any] = [imagea, imagea] return images @require_vision @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" ) SCREAMING_SNAKE_CASE : Dict = prepare_images() # test non-batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images[0] , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) SCREAMING_SNAKE_CASE : Optional[Any] = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , a ) # test batched SCREAMING_SNAKE_CASE : Dict = image_processing(a , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) SCREAMING_SNAKE_CASE : Any = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , a )
76
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class lowerCamelCase__ ( ctypes.Structure ): """simple docstring""" __a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": __UpperCAmelCase : Dict = CursorInfo() __UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) __UpperCAmelCase : Tuple = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": __UpperCAmelCase : str = CursorInfo() __UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) __UpperCAmelCase : Union[str, Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> str: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
115
0
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _lowercase = logging.getLogger(__name__) class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : List[str] = '''masked_bert''' def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="topK" , _lowercase="constant" , _lowercase=0.0 , **_lowercase , ): """simple docstring""" super().__init__(pad_token_id=_lowercase , **_lowercase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = pruning_method _lowerCAmelCase = mask_init _lowerCAmelCase = mask_scale
229
'''simple docstring''' from __future__ import annotations class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _lowercase ): """simple docstring""" _lowerCAmelCase = order # a_{0} ... a_{k} _lowerCAmelCase = [1.0] + [0.0] * order # b_{0} ... b_{k} _lowerCAmelCase = [1.0] + [0.0] * order # x[n-1] ... x[n-k] _lowerCAmelCase = [0.0] * self.order # y[n-1] ... y[n-k] _lowerCAmelCase = [0.0] * self.order def _lowercase ( self , _lowercase , _lowercase ): """simple docstring""" if len(_lowercase ) < self.order: _lowerCAmelCase = [1.0, *a_coeffs] if len(_lowercase ) != self.order + 1: _lowerCAmelCase = ( F'Expected a_coeffs to have {self.order + 1} elements ' F'for {self.order}-order filter, got {len(_lowercase )}' ) raise ValueError(_lowercase ) if len(_lowercase ) != self.order + 1: _lowerCAmelCase = ( F'Expected b_coeffs to have {self.order + 1} elements ' F'for {self.order}-order filter, got {len(_lowercase )}' ) raise ValueError(_lowercase ) _lowerCAmelCase = a_coeffs _lowerCAmelCase = b_coeffs def _lowercase ( self , _lowercase ): """simple docstring""" _lowerCAmelCase = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _lowerCAmelCase = self.input_history[:-1] _lowerCAmelCase = self.output_history[:-1] _lowerCAmelCase = sample _lowerCAmelCase = result return result
229
1
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowercase__ : Dict = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowercase__ : int = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowercase__ : Dict = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : Optional[int]="binary" , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict="warn" , ): lowerCAmelCase_ : Union[str, Any] = recall_score( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , pos_label=SCREAMING_SNAKE_CASE_ , average=SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ , zero_division=SCREAMING_SNAKE_CASE_ , ) return {"recall": float(SCREAMING_SNAKE_CASE_ ) if score.size == 1 else score}
224
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[str] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : str ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ): requires_backends(cls , ['flax'] ) class UpperCamelCase__ ( metaclass=lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""flax"""] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : int ): requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ): requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Tuple ): requires_backends(cls , ['flax'] )
224
1
"""simple docstring""" import operator as op SCREAMING_SNAKE_CASE : Union[str, Any] = '''scaler.pt''' SCREAMING_SNAKE_CASE : Optional[Any] = '''pytorch_model''' SCREAMING_SNAKE_CASE : str = '''random_states''' SCREAMING_SNAKE_CASE : int = '''optimizer''' SCREAMING_SNAKE_CASE : Optional[int] = '''scheduler''' SCREAMING_SNAKE_CASE : int = '''pytorch_model.bin''' SCREAMING_SNAKE_CASE : List[Any] = '''pytorch_model.bin.index.json''' SCREAMING_SNAKE_CASE : str = '''model.safetensors''' SCREAMING_SNAKE_CASE : str = '''model.safetensors.index.json''' SCREAMING_SNAKE_CASE : Dict = '''1.10.2''' SCREAMING_SNAKE_CASE : Optional[Any] = '''py38''' SCREAMING_SNAKE_CASE : List[str] = '''4.17.0''' SCREAMING_SNAKE_CASE : Dict = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] SCREAMING_SNAKE_CASE : Dict = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] SCREAMING_SNAKE_CASE : List[Any] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] SCREAMING_SNAKE_CASE : List[Any] = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] SCREAMING_SNAKE_CASE : Any = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] SCREAMING_SNAKE_CASE : Any = '''2.0.1''' SCREAMING_SNAKE_CASE : Tuple = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] SCREAMING_SNAKE_CASE : Tuple = ['''default''', '''reduce-overhead''', '''max-autotune'''] SCREAMING_SNAKE_CASE : int = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] SCREAMING_SNAKE_CASE : List[str] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] SCREAMING_SNAKE_CASE : Dict = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class lowercase__ ( UpperCamelCase_): def __init__( self : List[Any] , UpperCamelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList(UpperCamelCase__ ) def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : List[torch.tensor] , UpperCamelCase__ : List[float] , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ , self.nets ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = controlnet( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) # merge samples if i == 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = down_samples, mid_sample else: SCREAMING_SNAKE_CASE : int = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(UpperCamelCase__ , UpperCamelCase__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __A ( self : Any , UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Callable = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Tuple = save_directory for controlnet in self.nets: controlnet.save_pretrained( UpperCamelCase__ , is_main_process=UpperCamelCase__ , save_function=UpperCamelCase__ , safe_serialization=UpperCamelCase__ , variant=UpperCamelCase__ , ) idx += 1 SCREAMING_SNAKE_CASE : int = model_path_to_save + f"""_{idx}""" @classmethod def __A ( cls : Dict , UpperCamelCase__ : Optional[Union[str, os.PathLike]] , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : Optional[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path while os.path.isdir(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Any = ControlNetModel.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) controlnets.append(UpperCamelCase__ ) idx += 1 SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(UpperCamelCase__ )} controlnets loaded from {pretrained_model_path}.""" ) if len(UpperCamelCase__ ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(UpperCamelCase__ )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(UpperCamelCase__ )
182
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = ["""input_features""", """attention_mask"""] def __init__( self : Any , UpperCamelCase__ : List[str]=80 , UpperCamelCase__ : Tuple=1_6000 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : int=25 , UpperCamelCase__ : Optional[Any]="hamming_window" , UpperCamelCase__ : Tuple=3_2768.0 , UpperCamelCase__ : str=0.97 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = feature_size SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate SCREAMING_SNAKE_CASE : int = padding_value SCREAMING_SNAKE_CASE : Optional[Any] = hop_length SCREAMING_SNAKE_CASE : Tuple = win_length SCREAMING_SNAKE_CASE : Union[str, Any] = frame_signal_scale SCREAMING_SNAKE_CASE : int = preemphasis_coeff SCREAMING_SNAKE_CASE : List[Any] = mel_floor SCREAMING_SNAKE_CASE : int = normalize_means SCREAMING_SNAKE_CASE : List[str] = normalize_vars SCREAMING_SNAKE_CASE : Any = win_function SCREAMING_SNAKE_CASE : Union[str, Any] = return_attention_mask SCREAMING_SNAKE_CASE : int = win_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE : Optional[int] = hop_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE : int = optimal_fft_length(self.sample_size ) SCREAMING_SNAKE_CASE : Tuple = (self.n_fft // 2) + 1 def __A ( self : str , UpperCamelCase__ : np.array ): '''simple docstring''' if self.win_function == "hamming_window": SCREAMING_SNAKE_CASE : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE : Tuple = window_function(window_length=self.sample_size , name=self.win_function ) SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) SCREAMING_SNAKE_CASE : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=UpperCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCamelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , ) return msfc_features.T def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ): '''simple docstring''' if self.normalize_means: SCREAMING_SNAKE_CASE : str = x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE : List[str] = np.subtract(UpperCamelCase__ , UpperCamelCase__ ) if self.normalize_vars: SCREAMING_SNAKE_CASE : str = x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE : Optional[int] = np.divide(UpperCamelCase__ , UpperCamelCase__ ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE : List[str] = padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE : str = x.astype(np.floataa ) return x def __A ( self : Union[str, Any] , UpperCamelCase__ : List[np.ndarray] , UpperCamelCase__ : Optional[np.ndarray] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(UpperCamelCase__ , UpperCamelCase__ , self.padding_value ) for x, n in zip(UpperCamelCase__ , UpperCamelCase__ )] def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE : Any = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) SCREAMING_SNAKE_CASE : str = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : List[str] = [raw_speech] # extract fbank features SCREAMING_SNAKE_CASE : Optional[Any] = [self._extract_mfsc_features(UpperCamelCase__ ) for one_waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_features''': features} ) SCREAMING_SNAKE_CASE : Optional[Any] = self.pad( UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) # make sure list is in array format SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: SCREAMING_SNAKE_CASE : Optional[Any] = ( np.array(UpperCamelCase__ , dtype=np.intaa ) if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) SCREAMING_SNAKE_CASE : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=UpperCamelCase__ ) if return_tensors is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.convert_to_tensors(UpperCamelCase__ ) return padded_inputs
182
1
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class A__ ( _snake_case ): lowercase = "conditional_detr" lowercase = ["past_key_values"] lowercase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.25 , **UpperCamelCase__ , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) A_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): A_ = backbone_config.get("""model_type""" ) A_ = CONFIG_MAPPING[backbone_model_type] A_ = config_class.from_dict(UpperCamelCase__ ) A_ = use_timm_backbone A_ = backbone_config A_ = num_channels A_ = num_queries A_ = d_model A_ = encoder_ffn_dim A_ = encoder_layers A_ = encoder_attention_heads A_ = decoder_ffn_dim A_ = decoder_layers A_ = decoder_attention_heads A_ = dropout A_ = attention_dropout A_ = activation_dropout A_ = activation_function A_ = init_std A_ = init_xavier_std A_ = encoder_layerdrop A_ = decoder_layerdrop A_ = encoder_layers A_ = auxiliary_loss A_ = position_embedding_type A_ = backbone A_ = use_pretrained_backbone A_ = dilation # Hungarian matcher A_ = class_cost A_ = bbox_cost A_ = giou_cost # Loss coefficients A_ = mask_loss_coefficient A_ = dice_loss_coefficient A_ = cls_loss_coefficient A_ = bbox_loss_coefficient A_ = giou_loss_coefficient A_ = focal_alpha super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ ) @property def snake_case_ ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case_ ( self ) -> int: '''simple docstring''' return self.d_model def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A_ = self.backbone_config.to_dict() A_ = self.__class__.model_type return output class A__ ( _snake_case ): lowercase = version.parse("1.11" ) @property def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def snake_case_ ( self ) -> float: '''simple docstring''' return 1e-5 @property def snake_case_ ( self ) -> int: '''simple docstring''' return 12
162
'''simple docstring''' from functools import lru_cache def UpperCAmelCase__ ( UpperCAmelCase__ ) -> set: A_ = 2 A_ = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(UpperCAmelCase__ ) if n > 1: factors.add(UpperCAmelCase__ ) return factors @lru_cache def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: return len(unique_prime_factors(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool: return len(set(UpperCAmelCase__ ) ) in (0, 1) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list: A_ = 2 while True: # Increment each value of a generated range A_ = [base + i for i in range(UpperCAmelCase__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. A_ = [upf_len(UpperCAmelCase__ ) for x in group] checker.append(UpperCAmelCase__ ) # If all numbers in the list are equal, return the group variable. if equality(UpperCAmelCase__ ): return group # Increment our base variable by 1 base += 1 def UpperCAmelCase__ ( UpperCAmelCase__ = 4 ) -> int: A_ = run(UpperCAmelCase__ ) return results[0] if len(UpperCAmelCase__ ) else None if __name__ == "__main__": print(solution())
162
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch _a : Optional[Any] = random.Random() def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple=1.0 ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : Union[str, Any]=None ) -> List[str]: if rng is None: _lowerCAmelCase : Dict = global_rng _lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class __A ( unittest.TestCase ): def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=1 , a__=0.0 , a__=16000 , a__=True , a__=80 , a__=16 , a__=64 , a__="hann_window" , a__=80 , a__=7600 , a__=1e-10 , a__=True , ): _lowerCAmelCase : str = parent _lowerCAmelCase : Any = batch_size _lowerCAmelCase : Optional[int] = min_seq_length _lowerCAmelCase : Union[str, Any] = max_seq_length _lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowerCAmelCase : str = feature_size _lowerCAmelCase : Tuple = padding_value _lowerCAmelCase : Optional[Any] = sampling_rate _lowerCAmelCase : int = do_normalize _lowerCAmelCase : Optional[Any] = num_mel_bins _lowerCAmelCase : Any = hop_length _lowerCAmelCase : List[str] = win_length _lowerCAmelCase : int = win_function _lowerCAmelCase : Union[str, Any] = fmin _lowerCAmelCase : int = fmax _lowerCAmelCase : Optional[int] = mel_floor _lowerCAmelCase : Any = return_attention_mask def __A ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __A ( self , a__=False , a__=False ): def _flatten(a__ ): return list(itertools.chain(*a__ ) ) if equal_length: _lowerCAmelCase : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _lowerCAmelCase : Union[str, Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowerCAmelCase : str = [np.asarray(a__ ) for x in speech_inputs] return speech_inputs def __A ( self , a__=False , a__=False ): if equal_length: _lowerCAmelCase : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _lowerCAmelCase : Optional[int] = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowerCAmelCase : Tuple = [np.asarray(a__ ) for x in speech_inputs] return speech_inputs @require_torch class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Optional[Any] = SpeechTaFeatureExtractor def __A ( self ): _lowerCAmelCase : Any = SpeechTaFeatureExtractionTester(self ) def __A ( self , a__ ): self.assertTrue(np.all(np.mean(a__ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(a__ , axis=0 ) - 1 ) < 1e-3 ) ) def __A ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus _lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Optional[Any] = [np.asarray(a__ ) for speech_input in speech_inputs] # Test not batched input _lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values _lowerCAmelCase : int = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) ) # Test batched _lowerCAmelCase : Union[str, Any] = feat_extract(a__ , return_tensors="""np""" ).input_values _lowerCAmelCase : Optional[Any] = feat_extract(a__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a__ , a__ ): self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) ) def __A ( self ): _lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""] _lowerCAmelCase : Any = [None, 1600, None] for max_length, padding in zip(a__ , a__ ): _lowerCAmelCase : List[str] = feat_extract(a__ , padding=a__ , max_length=a__ , return_tensors="""np""" ) _lowerCAmelCase : Any = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __A ( self ): _lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in lengths] _lowerCAmelCase : Dict = ["""longest""", """max_length""", """do_not_pad"""] _lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(a__ , a__ ): _lowerCAmelCase : List[Any] = feat_extract(a__ , max_length=a__ , padding=a__ ) _lowerCAmelCase : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __A ( self ): _lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Union[str, Any] = feat_extract( a__ , truncation=a__ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" ) _lowerCAmelCase : Any = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __A ( self ): _lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Tuple = feat_extract( a__ , truncation=a__ , max_length=1000 , padding="""longest""" , return_tensors="""np""" ) _lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) _lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Optional[int] = feat_extract( a__ , truncation=a__ , max_length=2000 , padding="""longest""" , return_tensors="""np""" ) _lowerCAmelCase : List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __A ( self ): _lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _lowerCAmelCase : int = np.random.rand(100 ).astype(np.floataa ) _lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _lowerCAmelCase : int = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _lowerCAmelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __A ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus _lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase : Optional[Any] = [np.asarray(a__ ) for speech_input in speech_inputs] # Test feature size _lowerCAmelCase : Optional[Any] = feature_extractor(audio_target=a__ , padding=a__ , return_tensors="""np""" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input _lowerCAmelCase : Tuple = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values _lowerCAmelCase : int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) ) # Test batched _lowerCAmelCase : Union[str, Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values _lowerCAmelCase : Any = feature_extractor(a__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a__ , a__ ): self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)] _lowerCAmelCase : List[str] = np.asarray(a__ ) _lowerCAmelCase : Optional[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values _lowerCAmelCase : Tuple = feature_extractor(a__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a__ , a__ ): self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) ) def __A ( self ): _lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() _lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) _lowerCAmelCase : List[Any] = feat_extract.model_input_names[0] _lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a__ ) == len(a__ ) for x, y in zip(a__ , processed_features[input_name] ) ) ) _lowerCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ ) _lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" ) _lowerCAmelCase : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: _lowerCAmelCase : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __A ( self ): _lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ ) _lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) _lowerCAmelCase : Union[str, Any] = feat_extract.model_input_names[0] _lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" ) _lowerCAmelCase : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: _lowerCAmelCase : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __A ( self ): _lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_target() _lowerCAmelCase : int = feat_extract.model_input_names[0] _lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) _lowerCAmelCase : Tuple = feat_extract.num_mel_bins # hack! _lowerCAmelCase : Dict = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" )[input_name] _lowerCAmelCase : List[str] = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __A ( self ): _lowerCAmelCase : str = self.feat_extract_dict _lowerCAmelCase : Tuple = True _lowerCAmelCase : Dict = self.feature_extraction_class(**a__ ) _lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target() _lowerCAmelCase : Any = [len(a__ ) for x in speech_inputs] _lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0] _lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} ) _lowerCAmelCase : int = feat_extract.num_mel_bins # hack! _lowerCAmelCase : Any = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" ) self.assertIn("""attention_mask""" , a__ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a__ ) def __A ( self ): _lowerCAmelCase : List[str] = self.feat_extract_dict _lowerCAmelCase : Dict = True _lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**a__ ) _lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target() _lowerCAmelCase : str = [len(a__ ) for x in speech_inputs] _lowerCAmelCase : Tuple = feat_extract.model_input_names[0] _lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} ) _lowerCAmelCase : int = min(a__ ) _lowerCAmelCase : Dict = feat_extract.num_mel_bins # hack! _lowerCAmelCase : Optional[int] = feat_extract.pad( a__ , padding="""max_length""" , max_length=a__ , truncation=a__ , return_tensors="""np""" ) self.assertIn("""attention_mask""" , a__ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __A ( self , a__ ): from datasets import load_dataset _lowerCAmelCase : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech _lowerCAmelCase : Dict = ds.sort("""id""" ).select(range(a__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def __A ( self ): # fmt: off _lowerCAmelCase : Dict = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on _lowerCAmelCase : Any = self._load_datasamples(1 ) _lowerCAmelCase : Optional[int] = SpeechTaFeatureExtractor() _lowerCAmelCase : Union[str, Any] = feature_extractor(a__ , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 93680) ) self.assertTrue(torch.allclose(input_values[0, :30] , a__ , atol=1e-6 ) ) def __A ( self ): # fmt: off _lowerCAmelCase : Optional[Any] = torch.tensor( [-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7, -3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6, -3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1, -3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] ) # fmt: on _lowerCAmelCase : str = self._load_datasamples(1 ) _lowerCAmelCase : Optional[int] = SpeechTaFeatureExtractor() _lowerCAmelCase : Union[str, Any] = feature_extractor(audio_target=a__ , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4 ) )
126
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[torch.FloatTensor] = None _UpperCamelCase : torch.FloatTensor = None _UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None _UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ): super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ ) _lowerCAmelCase : Optional[Any] = project_dim _lowerCAmelCase : List[str] = pooler_fn _lowerCAmelCase : Any = learn_encoder _lowerCAmelCase : Optional[int] = use_attention_mask class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"] _UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"] _UpperCamelCase : List[Any] = "roberta" _UpperCamelCase : Optional[int] = RobertaSeriesConfig def __init__( self , a__ ): super().__init__(a__ ) _lowerCAmelCase : str = XLMRobertaModel(a__ ) _lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim ) _lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ ) if self.has_pre_transformation: _lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim ) _lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ): _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Optional[int] = self.base_model( input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , ) if self.has_pre_transformation: _lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2] _lowerCAmelCase : Optional[Any] = self.pre_LN(a__ ) _lowerCAmelCase : int = self.transformation_pre(a__ ) return TransformationModelOutput( projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: _lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
126
1
"""simple docstring""" import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class snake_case ( __UpperCAmelCase ): """simple docstring""" def __get__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]=None ): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) UpperCAmelCase__ = '__cached_' + self.fget.__name__ UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) if cached is None: UpperCAmelCase__ = self.fget(lowerCamelCase__ ) setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) return cached def a_ ( lowerCamelCase ): UpperCAmelCase__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def a_ ( lowerCamelCase ): if is_torch_fx_proxy(lowerCamelCase ): return True if is_torch_available(): import torch if isinstance(lowerCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCamelCase , np.ndarray ) def a_ ( lowerCamelCase ): return isinstance(lowerCamelCase , np.ndarray ) def a_ ( lowerCamelCase ): return _is_numpy(lowerCamelCase ) def a_ ( lowerCamelCase ): import torch return isinstance(lowerCamelCase , torch.Tensor ) def a_ ( lowerCamelCase ): return False if not is_torch_available() else _is_torch(lowerCamelCase ) def a_ ( lowerCamelCase ): import torch return isinstance(lowerCamelCase , torch.device ) def a_ ( lowerCamelCase ): return False if not is_torch_available() else _is_torch_device(lowerCamelCase ) def a_ ( lowerCamelCase ): import torch if isinstance(lowerCamelCase , lowerCamelCase ): if hasattr(lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase ) else: return False return isinstance(lowerCamelCase , torch.dtype ) def a_ ( lowerCamelCase ): return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase ) def a_ ( lowerCamelCase ): import tensorflow as tf return isinstance(lowerCamelCase , tf.Tensor ) def a_ ( lowerCamelCase ): return False if not is_tf_available() else _is_tensorflow(lowerCamelCase ) def a_ ( lowerCamelCase ): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCamelCase , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(lowerCamelCase ) return type(lowerCamelCase ) == tf.Tensor def a_ ( lowerCamelCase ): return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase ) def a_ ( lowerCamelCase ): import jax.numpy as jnp # noqa: F811 return isinstance(lowerCamelCase , jnp.ndarray ) def a_ ( lowerCamelCase ): return False if not is_flax_available() else _is_jax(lowerCamelCase ) def a_ ( lowerCamelCase ): if isinstance(lowerCamelCase , (dict, UserDict) ): return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()} elif isinstance(lowerCamelCase , (list, tuple) ): return [to_py_obj(lowerCamelCase ) for o in obj] elif is_tf_tensor(lowerCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(lowerCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCamelCase ): return np.asarray(lowerCamelCase ).tolist() elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a_ ( lowerCamelCase ): if isinstance(lowerCamelCase , (dict, UserDict) ): return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()} elif isinstance(lowerCamelCase , (list, tuple) ): return np.array(lowerCamelCase ) elif is_tf_tensor(lowerCamelCase ): return obj.numpy() elif is_torch_tensor(lowerCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCamelCase ): return np.asarray(lowerCamelCase ) else: return obj class snake_case ( __UpperCAmelCase ): """simple docstring""" def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = fields(self ) # Safety and consistency checks if not len(lowerCamelCase__ ): raise ValueError(f'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCAmelCase__ = getattr(self ,class_fields[0].name ) UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowerCamelCase__ ): if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): UpperCAmelCase__ = first_field.items() UpperCAmelCase__ = True else: try: UpperCAmelCase__ = iter(lowerCamelCase__ ) UpperCAmelCase__ = True except TypeError: UpperCAmelCase__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowerCamelCase__ ): if ( not isinstance(lowerCamelCase__ ,(list, tuple) ) or not len(lowerCamelCase__ ) == 2 or not isinstance(element[0] ,lowerCamelCase__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self ,element[0] ,element[1] ) if element[1] is not None: UpperCAmelCase__ = element[1] elif first_field is not None: UpperCAmelCase__ = first_field else: for field in class_fields: UpperCAmelCase__ = getattr(self ,field.name ) if v is not None: UpperCAmelCase__ = v def __delitem__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ): raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ): raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ): raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ): raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : List[str] ,lowerCamelCase__ : List[Any] ): if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): UpperCAmelCase__ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ ) super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ ) def __setitem__( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ): # Will raise a KeyException if needed super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[int] ): return tuple(self[k] for k in self.keys() ) class snake_case ( __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" @classmethod def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[Any] ): raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = "longest" snake_case__ = "max_length" snake_case__ = "do_not_pad" class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = "pt" snake_case__ = "tf" snake_case__ = "np" snake_case__ = "jax" class snake_case : """simple docstring""" def __init__( self : Tuple ,lowerCamelCase__ : List[ContextManager] ): UpperCAmelCase__ = context_managers UpperCAmelCase__ = ExitStack() def __enter__( self : Union[str, Any] ): for context_manager in self.context_managers: self.stack.enter_context(lowerCamelCase__ ) def __exit__( self : Union[str, Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Dict ): self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ ) def a_ ( lowerCamelCase ): UpperCAmelCase__ = infer_framework(lowerCamelCase ) if framework == "tf": UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a_ ( lowerCamelCase ): UpperCAmelCase__ = model_class.__name__ UpperCAmelCase__ = infer_framework(lowerCamelCase ) if framework == "tf": UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ): def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ): for k, v in d.items(): UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k if v and isinstance(lowerCamelCase , lowerCamelCase ): yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items() else: yield key, v return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) @contextmanager def a_ ( lowerCamelCase , lowerCamelCase = False ): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a_ ( lowerCamelCase , lowerCamelCase=None ): if is_numpy_array(lowerCamelCase ): return np.transpose(lowerCamelCase , axes=lowerCamelCase ) elif is_torch_tensor(lowerCamelCase ): return array.T if axes is None else array.permute(*lowerCamelCase ) elif is_tf_tensor(lowerCamelCase ): import tensorflow as tf return tf.transpose(lowerCamelCase , perm=lowerCamelCase ) elif is_jax_tensor(lowerCamelCase ): return jnp.transpose(lowerCamelCase , axes=lowerCamelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' ) def a_ ( lowerCamelCase , lowerCamelCase ): if is_numpy_array(lowerCamelCase ): return np.reshape(lowerCamelCase , lowerCamelCase ) elif is_torch_tensor(lowerCamelCase ): return array.reshape(*lowerCamelCase ) elif is_tf_tensor(lowerCamelCase ): import tensorflow as tf return tf.reshape(lowerCamelCase , lowerCamelCase ) elif is_jax_tensor(lowerCamelCase ): return jnp.reshape(lowerCamelCase , lowerCamelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' ) def a_ ( lowerCamelCase , lowerCamelCase=None ): if is_numpy_array(lowerCamelCase ): return np.squeeze(lowerCamelCase , axis=lowerCamelCase ) elif is_torch_tensor(lowerCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase ) elif is_tf_tensor(lowerCamelCase ): import tensorflow as tf return tf.squeeze(lowerCamelCase , axis=lowerCamelCase ) elif is_jax_tensor(lowerCamelCase ): return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' ) def a_ ( lowerCamelCase , lowerCamelCase ): if is_numpy_array(lowerCamelCase ): return np.expand_dims(lowerCamelCase , lowerCamelCase ) elif is_torch_tensor(lowerCamelCase ): return array.unsqueeze(dim=lowerCamelCase ) elif is_tf_tensor(lowerCamelCase ): import tensorflow as tf return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase ) elif is_jax_tensor(lowerCamelCase ): return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' ) def a_ ( lowerCamelCase ): if is_numpy_array(lowerCamelCase ): return np.size(lowerCamelCase ) elif is_torch_tensor(lowerCamelCase ): return array.numel() elif is_tf_tensor(lowerCamelCase ): import tensorflow as tf return tf.size(lowerCamelCase ) elif is_jax_tensor(lowerCamelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' ) def a_ ( lowerCamelCase , lowerCamelCase ): for key, value in auto_map.items(): if isinstance(lowerCamelCase , (tuple, list) ): UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ = f'''{repo_id}--{value}''' return auto_map def a_ ( lowerCamelCase ): for base_class in inspect.getmro(lowerCamelCase ): UpperCAmelCase__ = base_class.__module__ UpperCAmelCase__ = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
98
def a__ ( __UpperCamelCase ): if not head: return True # split the list to two parts SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = head.next, head while fast and fast.next: SCREAMING_SNAKE_CASE_ = fast.next.next SCREAMING_SNAKE_CASE_ = slow.next SCREAMING_SNAKE_CASE_ = slow.next SCREAMING_SNAKE_CASE_ = None # Don't forget here! But forget still works! # reverse the second part SCREAMING_SNAKE_CASE_ = None while second: SCREAMING_SNAKE_CASE_ = second.next SCREAMING_SNAKE_CASE_ = node SCREAMING_SNAKE_CASE_ = second SCREAMING_SNAKE_CASE_ = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False SCREAMING_SNAKE_CASE_ = node.next SCREAMING_SNAKE_CASE_ = head.next return True def a__ ( __UpperCamelCase ): if not head or not head.next: return True # 1. Get the midpoint (slow) SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = head while fast and fast.next: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fast.next.next, slow.next # 2. Push the second half into the stack SCREAMING_SNAKE_CASE_ = [slow.val] while slow.next: SCREAMING_SNAKE_CASE_ = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False SCREAMING_SNAKE_CASE_ = cur.next return True def a__ ( __UpperCamelCase ): if not head or not head.next: return True SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = 0 while head: if head.val in d: d[head.val].append(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = [pos] SCREAMING_SNAKE_CASE_ = head.next pos += 1 SCREAMING_SNAKE_CASE_ = pos - 1 SCREAMING_SNAKE_CASE_ = 0 for v in d.values(): if len(__UpperCamelCase ) % 2 != 0: middle += 1 else: SCREAMING_SNAKE_CASE_ = 0 for i in range(0 , len(__UpperCamelCase ) ): if v[i] + v[len(__UpperCamelCase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
118
0
def A ( _lowerCamelCase = 50 ): '''simple docstring''' _lowerCAmelCase : List[str] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
360
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") _lowerCAmelCase : Tuple = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(_lowerCamelCase ): os.makedirs(_lowerCamelCase ) _lowerCAmelCase : Any = model.state_dict() def to_tf_var_name(_lowerCamelCase ): for patt, repl in iter(_lowerCamelCase ): _lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase ) return F"bert/{name}" def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype ) _lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_lowerCamelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: _lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase ) _lowerCAmelCase : Any = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): _lowerCAmelCase : Tuple = torch_tensor.T _lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase ) tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase ) print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" ) _lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" ) _lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase ) _lowerCAmelCase : List[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
300
0
"""simple docstring""" def _A ( UpperCamelCase_ : list[int]) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(UpperCamelCase_, (list, tuple)) or not all( isinstance(UpperCamelCase_, UpperCamelCase_) for number in numbers): raise ValueError("numbers must be an iterable of integers") __lowercase = __lowercase = __lowercase = numbers[0] for i in range(1, len(UpperCamelCase_)): # update the maximum and minimum subarray products __lowercase = numbers[i] if number < 0: __lowercase ,__lowercase = min_till_now, max_till_now __lowercase = max(UpperCamelCase_, max_till_now * number) __lowercase = min(UpperCamelCase_, min_till_now * number) # update the maximum product found till now __lowercase = max(UpperCamelCase_, UpperCamelCase_) return max_prod
17
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) __snake_case : Any = logging.getLogger(__name__) __snake_case : Any = {"""facebook/bart-base""": BartForConditionalGeneration} __snake_case : Tuple = {"""facebook/bart-base""": BartTokenizer} def _UpperCAmelCase ( ): '''simple docstring''' a_ : List[str] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""") parser.add_argument( """--validation_file""" , type=a__ , default=a__ , help="""A csv or a json file containing the validation data.""") parser.add_argument( """--max_length""" , type=a__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , ) parser.add_argument( """--num_beams""" , type=a__ , default=a__ , help=( """Number of beams to use for evaluation. This argument will be """ """passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.""" ) , ) parser.add_argument( """--model_name_or_path""" , type=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a__ , ) parser.add_argument( """--config_name""" , type=a__ , default=a__ , help="""Pretrained config name or path if not the same as model_name""" , ) parser.add_argument( """--device""" , type=a__ , default="""cpu""" , help="""Device where the model will be run""" , ) parser.add_argument("""--output_file_path""" , type=a__ , default=a__ , help="""Where to store the final ONNX file.""") a_ : Any = parser.parse_args() return args def _UpperCAmelCase ( a__ , a__="cpu"): '''simple docstring''' a_ : Optional[int] = model_dict[model_name].from_pretrained(a__).to(a__) a_ : List[str] = tokenizer_dict[model_name].from_pretrained(a__) if model_name in ["facebook/bart-base"]: a_ : Tuple = 0 a_ : Optional[int] = None a_ : Union[str, Any] = 0 return huggingface_model, tokenizer def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__): '''simple docstring''' model.eval() a_ : Optional[Any] = None a_ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(a__)) with torch.no_grad(): a_ : Any = """My friends are cool but they eat too many carbs.""" a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""").to(model.device) a_ : Optional[int] = model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( a__ , ( inputs["""input_ids"""], inputs["""attention_mask"""], num_beams, max_length, model.config.decoder_start_token_id, ) , a__ , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """seq"""}, """output_ids""": {0: """batch""", 1: """seq_out"""}, } , example_outputs=a__ , ) logger.info("""Model exported to {}""".format(a__)) a_ : List[str] = remove_dup_initializers(os.path.abspath(a__)) logger.info("""Deduplicated and optimized model written to {}""".format(a__)) a_ : Union[str, Any] = onnxruntime.InferenceSession(a__) a_ : Any = ort_sess.run( a__ , { """input_ids""": inputs["""input_ids"""].cpu().numpy(), """attention_mask""": inputs["""attention_mask"""].cpu().numpy(), """num_beams""": np.array(a__), """max_length""": np.array(a__), """decoder_start_token_id""": np.array(model.config.decoder_start_token_id), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3) logger.info("""Model outputs from torch and ONNX Runtime are similar.""") logger.info("""Success.""") def _UpperCAmelCase ( ): '''simple docstring''' a_ : List[str] = parse_args() a_ : str = 5 a_ : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.setLevel(logging.INFO) transformers.utils.logging.set_verbosity_error() a_ : int = torch.device(args.device) a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , a__) if model.config.decoder_start_token_id is None: raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""") model.to(a__) if args.max_length: a_ : List[str] = args.max_length if args.num_beams: a_ : Optional[Any] = args.num_beams if args.output_file_path: a_ : Optional[int] = args.output_file_path else: a_ : Tuple = """BART.onnx""" logger.info("""Exporting model to ONNX""") export_and_validate_model(a__ , a__ , a__ , a__ , a__) if __name__ == "__main__": main()
248
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Dict: _A = tempfile.mkdtemp() _A = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _A = { """do_resize""": True, """size""": {"""height""": 2_24, """width""": 2_24}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], """do_convert_rgb""": True, } _A = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> Any: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase ( self ) -> str: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) _A = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE ) _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) _A = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ) -> Optional[int]: _A = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" ) _A = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE ) _A = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__SCREAMING_SNAKE_CASE ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ) -> List[str]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) _A = self.prepare_image_inputs() _A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) _A = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase ( self ) -> Tuple: _A = self.get_image_processor() _A = self.get_tokenizer() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) _A = """Alexandra,T-shirt的价格是15便士。""" _A = processor(text=__SCREAMING_SNAKE_CASE ) _A = tokenizer(__SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase ( self ) -> List[str]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) _A = """Alexandra,T-shirt的价格是15便士。""" _A = self.prepare_image_inputs() _A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE ): processor() def UpperCAmelCase ( self ) -> Tuple: _A = self.get_image_processor() _A = self.get_tokenizer() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__SCREAMING_SNAKE_CASE ) _A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ) -> int: _A = self.get_image_processor() _A = self.get_tokenizer() _A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) _A = """Alexandra,T-shirt的价格是15便士。""" _A = self.prepare_image_inputs() _A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
363
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
81
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowerCamelCase : Any =XGLMConfig lowerCamelCase : List[Any] ={} lowerCamelCase : Optional[int] ="gelu" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str: a : Optional[int] = parent a : int = batch_size a : Optional[Any] = seq_length a : Any = is_training a : Optional[int] = use_input_mask a : str = use_labels a : Any = vocab_size a : Optional[int] = d_model a : int = num_hidden_layers a : Union[str, Any] = num_attention_heads a : Union[str, Any] = ffn_dim a : Any = activation_function a : Tuple = activation_dropout a : int = attention_dropout a : Optional[int] = max_position_embeddings a : str = initializer_range a : List[Any] = None a : Tuple = 0 a : str = 2 a : Optional[int] = 1 def __a ( self ) -> Any: return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def __a ( self ) -> Optional[int]: a : Dict = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) a : Tuple = None if self.use_input_mask: a : Dict = random_attention_mask([self.batch_size, self.seq_length] ) a : int = self.get_config() a : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __a ( self ) -> Union[str, Any]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a__ , ) def __a ( self ) -> Optional[int]: a : Tuple = self.prepare_config_and_inputs() ( a ) : List[str] = config_and_inputs a : Tuple = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): lowerCamelCase : Optional[Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase : Optional[Any] =( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase : Any =False lowerCamelCase : int =False lowerCamelCase : Union[str, Any] =False def __a ( self ) -> int: a : List[str] = TFXGLMModelTester(self ) a : Optional[Any] = ConfigTester(self , config_class=a__ , n_embd=37 ) def __a ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() @slow def __a ( self ) -> int: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Any = TFXGLMModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def __a ( self ) -> int: super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def __a ( self , lowerCAmelCase__=True ) -> Optional[int]: a : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) a : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off a : Any = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on a : str = model.generate(a__ , do_sample=a__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , a__ ) @slow def __a ( self ) -> int: a : str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) a : Tuple = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) a : List[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" ) a : Optional[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): a : List[str] = model.generate(a__ , do_sample=a__ , seed=[7, 0] ) a : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a__ ) a : Union[str, Any] = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(a__ , a__ ) @slow def __a ( self ) -> Any: a : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) a : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) a : Optional[Any] = """left""" # use different length sentences to test batching a : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] a : int = tokenizer(a__ , return_tensors="tf" , padding=a__ ) a : Optional[int] = inputs["""input_ids"""] a : List[Any] = model.generate(input_ids=a__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 ) a : Any = tokenizer(sentences[0] , return_tensors="tf" ).input_ids a : List[Any] = model.generate(input_ids=a__ , max_new_tokens=12 ) a : str = tokenizer(sentences[1] , return_tensors="tf" ).input_ids a : Optional[Any] = model.generate(input_ids=a__ , max_new_tokens=12 ) a : Any = tokenizer.batch_decode(a__ , skip_special_tokens=a__ ) a : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a__ ) a : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a__ ) a : Dict = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(a__ , a__ ) self.assertListEqual(a__ , [non_padded_sentence, padded_sentence] )
105
"""simple docstring""" from scipy.stats import pearsonr import datasets _a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' _a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' _a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def __A ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def __A ( self , a__ , a__ , a__=False ): if return_pvalue: _lowerCAmelCase : List[Any] = pearsonr(a__ , a__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
44
0
from ...configuration_utils import PretrainedConfig class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : Tuple = '''bert-generation''' def __init__( self , _lowercase=50_358 , _lowercase=1_024 , _lowercase=24 , _lowercase=16 , _lowercase=4_096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase="absolute" , _lowercase=True , **_lowercase , ): """simple docstring""" super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = position_embedding_type _lowerCAmelCase = use_cache
362
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self , _lowercase , _lowercase = None , _lowercase = None ): """simple docstring""" super().__init__() _lowerCAmelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _lowerCAmelCase = torch.zeros(_lowercase , _lowercase ) else: _lowerCAmelCase = None _lowerCAmelCase = torch.nn.Parameter(_lowercase ) class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : VQModel _lowercase : CLIPTextModel _lowercase : CLIPTokenizer _lowercase : TransformeraDModel _lowercase : LearnedClassifierFreeSamplingEmbeddings _lowercase : VQDiffusionScheduler def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): """simple docstring""" super().__init__() self.register_modules( vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) def _lowercase ( self , _lowercase , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1 # get prompt text embeddings _lowerCAmelCase = self.tokenizer( _lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) _lowerCAmelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) _lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length] _lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase ) # duplicate text embeddings for each generation per prompt _lowerCAmelCase = prompt_embeds.repeat_interleave(_lowercase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings _lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 ) else: _lowerCAmelCase = [""""""] * batch_size _lowerCAmelCase = text_input_ids.shape[-1] _lowerCAmelCase = self.tokenizer( _lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , ) _lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCAmelCase = negative_prompt_embeds.shape[1] _lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowercase , 1 ) _lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , _lowercase , _lowercase = 100 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ): """simple docstring""" if isinstance(_lowercase , _lowercase ): _lowerCAmelCase = 1 elif isinstance(_lowercase , _lowercase ): _lowerCAmelCase = len(_lowercase ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' ) _lowerCAmelCase = batch_size * num_images_per_prompt _lowerCAmelCase = guidance_scale > 1.0 _lowerCAmelCase = self._encode_prompt(_lowercase , _lowercase , _lowercase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(_lowercase )}.' ) # get the initial completely masked latents unless the user supplied it _lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: _lowerCAmelCase = self.transformer.num_vector_embeds - 1 _lowerCAmelCase = torch.full(_lowercase , _lowercase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) _lowerCAmelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_lowercase , device=self.device ) _lowerCAmelCase = self.scheduler.timesteps.to(self.device ) _lowerCAmelCase = latents for i, t in enumerate(self.progress_bar(_lowercase ) ): # expand the sample if we are doing classifier free guidance _lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _lowerCAmelCase = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 ) _lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase ) _lowerCAmelCase = self.truncate(_lowercase , _lowercase ) # remove `log(0)`'s (`-inf`s) _lowerCAmelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_lowercase , _lowercase , _lowercase ) _lowerCAmelCase = self.vqvae.config.vq_embed_dim _lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase ) _lowerCAmelCase = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample _lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowercase ) def _lowercase ( self , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowercase , 1 , descending=_lowercase ) _lowerCAmelCase = torch.exp(_lowercase ) _lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowercase ) _lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 ) _lowerCAmelCase = keep_mask[:, :-1, :] _lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) ) _lowerCAmelCase = log_p_x_0.clone() _lowerCAmelCase = -torch.inf # -inf = log(0) return rv
229
0
'''simple docstring''' import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = botoa.client("""iam""" ) UpperCAmelCase__ : Any = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=UpperCamelCase__ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) ) UpperCAmelCase__ : Optional[int] = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=UpperCamelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : List[str] = botoa.client("""iam""" ) return iam_client.get_role(RoleName=UpperCamelCase__ )["Role"]["Arn"] def _UpperCamelCase ( ): UpperCAmelCase__ : List[str] = _ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , UpperCamelCase__ , ) UpperCAmelCase__ : Any = None if credentials_configuration == 0: UpperCAmelCase__ : List[str] = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) UpperCAmelCase__ : Any = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) UpperCAmelCase__ : Any = _ask_field("""AWS Access Key ID: """ ) UpperCAmelCase__ : Dict = aws_access_key_id UpperCAmelCase__ : Dict = _ask_field("""AWS Secret Access Key: """ ) UpperCAmelCase__ : Tuple = aws_secret_access_key UpperCAmelCase__ : int = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) UpperCAmelCase__ : List[str] = aws_region UpperCAmelCase__ : List[str] = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , UpperCamelCase__ , ) if role_management == 0: UpperCAmelCase__ : int = _ask_field("""Enter your IAM role name: """ ) else: UpperCAmelCase__ : int = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' ) _create_iam_role_for_sagemaker(UpperCamelCase__ ) UpperCAmelCase__ : List[Any] = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) UpperCAmelCase__ : Optional[int] = None if is_custom_docker_image: UpperCAmelCase__ : int = _ask_field("""Enter your Docker image: """ , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() ) UpperCAmelCase__ : str = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) UpperCAmelCase__ : List[Any] = None if is_sagemaker_inputs_enabled: UpperCAmelCase__ : Optional[int] = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , ) UpperCAmelCase__ : Union[str, Any] = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) UpperCAmelCase__ : Dict = None if is_sagemaker_metrics_enabled: UpperCAmelCase__ : Any = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , ) UpperCAmelCase__ : List[str] = _ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) UpperCAmelCase__ : str = {} UpperCAmelCase__ : str = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) if use_dynamo: UpperCAmelCase__ : Tuple = """dynamo_""" UpperCAmelCase__ : Union[str, Any] = _ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) UpperCAmelCase__ : Any = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) if use_custom_options: UpperCAmelCase__ : Dict = _ask_options( """Which mode do you want to use?""" , UpperCamelCase__ , lambda UpperCamelCase__ : TORCH_DYNAMO_MODES[int(UpperCamelCase__ )] , default="""default""" , ) UpperCAmelCase__ : int = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) UpperCAmelCase__ : Any = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message="""Please enter yes or no.""" , ) UpperCAmelCase__ : Any = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: UpperCAmelCase__ : str = _ask_options( UpperCamelCase__ , UpperCamelCase__ , lambda UpperCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" UpperCAmelCase__ : List[str] = _ask_field(UpperCamelCase__ , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , default="""ml.p3.2xlarge""" ) UpperCAmelCase__ : str = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): UpperCAmelCase__ : Optional[Any] = _ask_field( """How many machines do you want use? [1]: """ , UpperCamelCase__ , default=1 , ) UpperCAmelCase__ : Optional[Any] = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=UpperCamelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase__ , use_cpu=UpperCamelCase__ , dynamo_config=UpperCamelCase__ , eca_instance_type=UpperCamelCase__ , profile=UpperCamelCase__ , region=UpperCamelCase__ , iam_role_name=UpperCamelCase__ , mixed_precision=UpperCamelCase__ , num_machines=UpperCamelCase__ , sagemaker_inputs_file=UpperCamelCase__ , sagemaker_metrics_file=UpperCamelCase__ , )
163
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __UpperCAmelCase ( A__ , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = BioGptTokenizer __lowerCAmelCase = False def A (self : int ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) def A (self : Tuple , _lowerCAmelCase : List[str] ): A = """lower newer""" A = """lower newer""" return input_text, output_text def A (self : List[Any] ): A = BioGptTokenizer(self.vocab_file , self.merges_file ) A = """lower""" A = ["""low""", """er</w>"""] A = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) A = tokens + ["""<unk>"""] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase ) @slow def A (self : Union[str, Any] ): A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase ) A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase ) A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
258
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a = None a = logging.get_logger(__name__) a = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a = { '''facebook/mbart-large-en-ro''': 1_024, '''facebook/mbart-large-cc25''': 1_024, } # fmt: off a = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class lowercase_ ( a__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Any = ['input_ids', 'attention_mask'] UpperCAmelCase : Tuple = MBartTokenizer UpperCAmelCase : List[int] = [] UpperCAmelCase : List[int] = [] def __init__( self : Tuple , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token super().__init__( vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , ) _A = vocab_file _A = False if not self.vocab_file else True _A = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _A = { lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A = src_lang if src_lang is not None else '''en_XX''' _A = self.convert_tokens_to_ids(self._src_lang ) _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCAmelCase_ ( self : int ): return self._src_lang @src_lang.setter def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ): _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , **_UpperCAmelCase : Any ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _A = src_lang _A = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) _A = self.convert_tokens_to_ids(_lowerCamelCase ) _A = tgt_lang_id return inputs def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] = "en_XX" , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = "ro_RO" , **_UpperCAmelCase : List[str] , ): _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase_ ( self : List[str] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase_ ( self : Optional[int] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int ): _A = self.convert_tokens_to_ids(_lowerCamelCase ) _A = [] _A = [self.eos_token_id, self.cur_lang_code] _A = self.convert_ids_to_tokens(self.prefix_tokens ) _A = self.convert_ids_to_tokens(self.suffix_tokens ) _A = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Any ): _A = self.convert_tokens_to_ids(_lowerCamelCase ) _A = [] _A = [self.eos_token_id, self.cur_lang_code] _A = self.convert_ids_to_tokens(self.prefix_tokens ) _A = self.convert_ids_to_tokens(self.suffix_tokens ) _A = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return _A = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
369
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field( default=__lowerCAmelCase , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def lowerCAmelCase_ ( self : int ): _A = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _A = v.to_dict() return d
271
0
from __future__ import annotations def A ( _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = len(_UpperCAmelCase ) // 2 # choose the middle 3 elements SCREAMING_SNAKE_CASE : int = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
182
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Any = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
167
0
"""simple docstring""" def _snake_case ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int: '''simple docstring''' _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[Any] = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"{solution() = }")
369
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : Union[str, Any] = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[str] = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
202
0
"""simple docstring""" from math import factorial __A = {str(digit): factorial(digit) for digit in range(1_0)} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 6_0 , __UpperCAmelCase = 1_0_0_0_0_0_0 ) -> int: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length lowercase__: Tuple = 0 # the cached sizes of the previous chains lowercase__: dict[int, int] = {} for start_chain_element in range(1 , __UpperCAmelCase ): # The temporary set will contain the elements of the chain lowercase__: Union[str, Any] = set() lowercase__: str = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowercase__: Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__UpperCAmelCase ) chain_set_length += 1 lowercase__: Any = digit_factorial_sum(__UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowercase__: Optional[int] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
177
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: # Initialise PyTorch model lowercase__: Optional[Any] = FunnelConfig.from_json_file(__UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) lowercase__: List[Any] = FunnelBaseModel(__UpperCAmelCase ) if base_model else FunnelModel(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_funnel(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __UpperCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
177
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") lowercase_ = logging.getLogger(__name__) @dataclass class _snake_case : UpperCamelCase__ : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) UpperCamelCase__ : int =field( default=1_0_2_4 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """A csv or a json file containing the training data."""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """A csv or a json file containing the validation data."""}) UpperCamelCase__ : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A csv or a json file containing the test data."""}) def A__ ( self : str ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: lowercase__ = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase__ = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _snake_case : UpperCamelCase__ : str =field( default=lowercase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase__ : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __lowerCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase__ = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase__ = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase__ = data_args.train_file.split("." )[-1] lowercase__ = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase__ = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files lowercase__ = load_dataset("csv" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase__ = load_dataset("json" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase__ = raw_datasets["train"].features["label"].names lowercase__ = len(SCREAMING_SNAKE_CASE_ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase__ = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE_ , ) lowercase__ = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase__ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase__ = {"Refused": 0, "Entailed": 1} lowercase__ = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(SCREAMING_SNAKE_CASE_ ): # Tokenize the texts def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE_ ): lowercase__ = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase__ = examples["statement"] lowercase__ = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ) lowercase__ = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): lowercase__ = raw_datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowercase__ = raw_datasets["train"] if data_args.max_train_samples is not None: lowercase__ = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowercase__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) lowercase__ = raw_datasets["test"] if data_args.max_predict_samples is not None: lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE_ ): lowercase__ = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions lowercase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ = default_data_collator elif training_args.fpaa: lowercase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) else: lowercase__ = None # Initialize our Trainer lowercase__ = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) lowercase__ = train_result.metrics lowercase__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowercase__ = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ ) lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ ) lowercase__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase__ = predict_dataset.remove_columns("label" ) lowercase__ = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix="predict" ).predictions lowercase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) lowercase__ = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(SCREAMING_SNAKE_CASE_ ): lowercase__ = label_list[item] writer.write(f'''{index}\t{item}\n''' ) lowercase__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
362
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) lowercase_ = 2_9979_2458 # Symbols lowercase_ , lowercase_ , lowercase_ , lowercase_ = symbols("""ct x y z""") def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): # Ensure event is not empty if event is None: lowercase__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: lowercase_ = transform(2997_9245) print("""Example of four vector: """) print(F'ct\' = {four_vector[0]}') print(F'x\' = {four_vector[1]}') print(F'y\' = {four_vector[2]}') print(F'z\' = {four_vector[3]}') # Substitute symbols with numerical values lowercase_ = {ct: c, x: 1, y: 1, z: 1} lowercase_ = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'\n{numerical_vector}')
224
0