code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case_ = "AAPL" ): _A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" ) _A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""",class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
26
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Tuple ): '''simple docstring''' debug_launcher(test_script.main ) def lowercase_ ( self : List[str] ): '''simple docstring''' debug_launcher(test_ops.main )
181
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''yjernite/retribert-base-uncased''': 5_12, } __snake_case = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : List[str] = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any = PRETRAINED_INIT_CONFIGURATION __lowerCamelCase : Union[str, Any] = RetriBertTokenizer __lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ) -> Optional[int]: '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) UpperCAmelCase : Optional[int] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , snake_case__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , snake_case__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , snake_case__ ) != tokenize_chinese_chars ): UpperCAmelCase : Optional[Any] =getattr(snake_case__ , normalizer_state.pop('''type''' ) ) UpperCAmelCase : Any =do_lower_case UpperCAmelCase : List[Any] =strip_accents UpperCAmelCase : int =tokenize_chinese_chars UpperCAmelCase : Tuple =normalizer_class(**snake_case__ ) UpperCAmelCase : Union[str, Any] =do_lower_case def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None ) -> int: '''simple docstring''' UpperCAmelCase : Any =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : str =[self.sep_token_id] UpperCAmelCase : List[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] =self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
78
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
78
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class lowerCAmelCase : '''simple docstring''' def __init__( self : str , __a : List[str] , __a : int = 13 , __a : int = 64 , __a : int = 2 , __a : int = 3 , __a : int = 3 , __a : bool = True , __a : bool = True , __a : int = 128 , __a : Optional[Any]=[16, 32, 64, 128] , __a : int = 7 , __a : int = 4 , __a : int = 37 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 10 , __a : float = 0.02 , __a : int = 2 , __a : int = 1 , __a : int = 128 , __a : List[int] = [2, 2, 2, 2] , __a : int = 2 , __a : int = 2 , ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : Tuple = batch_size __lowercase : List[str] = image_size __lowercase : List[Any] = patch_size __lowercase : str = num_channels __lowercase : Dict = is_training __lowercase : Optional[int] = use_labels __lowercase : Union[str, Any] = hidden_size __lowercase : Optional[Any] = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : int = intermediate_size __lowercase : List[Any] = hidden_act __lowercase : Optional[int] = hidden_dropout_prob __lowercase : Union[str, Any] = attention_probs_dropout_prob __lowercase : Union[str, Any] = type_sequence_label_size __lowercase : Optional[Any] = initializer_range __lowercase : List[Any] = encoder_stride __lowercase : str = num_attention_outputs __lowercase : Tuple = embed_dim __lowercase : Tuple = embed_dim + 1 __lowercase : List[str] = resolution __lowercase : Optional[Any] = depths __lowercase : Dict = hidden_sizes __lowercase : Union[str, Any] = dim __lowercase : str = mlp_expansion_ratio def lowerCAmelCase ( self : int ) -> int: """simple docstring""" __lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : Optional[int] = None if self.use_labels: __lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : List[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any] , __a : int , __a : int ) -> int: """simple docstring""" __lowercase : List[Any] = TFEfficientFormerModel(config=__a ) __lowercase : Optional[int] = model(__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Tuple ) -> Optional[int]: """simple docstring""" __lowercase : List[Any] = self.type_sequence_label_size __lowercase : Optional[Any] = TFEfficientFormerForImageClassification(__a ) __lowercase : Union[str, Any] = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase : Optional[Any] = 1 __lowercase : int = TFEfficientFormerForImageClassification(__a ) __lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : Tuple = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase : Union[str, Any] = config_and_inputs __lowercase : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _A : List[str] = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _A : Any = False _A : Optional[int] = False _A : Dict = False _A : List[Any] = False _A : int = False def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase : Optional[int] = TFEfficientFormerModelTester(self ) __lowercase : str = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[int] = model_class(__a ) __lowercase : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : str = [*signature.parameters.keys()] __lowercase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a : Any , __a : str , __a : Any ): __lowercase : Dict = model_class(__a ) __lowercase : Tuple = model(**self._prepare_for_class(__a , __a ) , training=__a ) __lowercase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase : Dict = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__a ) , __a ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase : Union[str, Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase : str = seq_length * self.model_tester.chunk_length else: __lowercase : Union[str, Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase : Union[str, Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple) ) self.assertEqual(len(__a ) , __a ) __lowercase : List[str] = getattr(self.model_tester , """seq_length""" , __a ) __lowercase : Tuple = getattr(self.model_tester , """decoder_seq_length""" , __a ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : int = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : List[Any] , __a : Optional[int]=False ) -> int: """simple docstring""" __lowercase : Any = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self : str ) -> Any: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Any = TFEfficientFormerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : int = True __lowercase : Optional[int] = getattr(self.model_tester , """seq_length""" , __a ) __lowercase : List[Any] = getattr(self.model_tester , """encoder_seq_length""" , __a ) __lowercase : str = getattr(self.model_tester , """key_length""" , __a ) __lowercase : Any = getattr(self.model_tester , """chunk_length""" , __a ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase : Optional[int] = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase : Union[str, Any] = True __lowercase : Tuple = False __lowercase : List[str] = True __lowercase : int = model_class(__a ) __lowercase : str = model(**self._prepare_for_class(__a , __a ) , training=__a ) __lowercase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase : str = True __lowercase : Any = model_class(__a ) __lowercase : List[str] = model(**self._prepare_for_class(__a , __a ) , training=__a ) __lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" __lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase : List[str] = model_class(__a ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase : List[str] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase : List[str] = model(__a ) self.assertTrue(outputs_dict is not None ) def snake_case_ ( ): __lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : int = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase : Optional[Any] = self.default_image_processor __lowercase : Optional[int] = prepare_img() __lowercase : List[str] = image_processor(images=__a , return_tensors="""tf""" ) # forward pass __lowercase : int = model(**__a , training=__a ) # verify the logits __lowercase : Union[str, Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : Optional[Any] = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase : Dict = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase : Optional[int] = self.default_image_processor __lowercase : List[Any] = prepare_img() __lowercase : Any = image_processor(images=__a , return_tensors="""tf""" ) # forward pass __lowercase : List[str] = model(**__a , training=__a ) # verify the logits __lowercase : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
233
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''') lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') lowerCamelCase : Optional[int] = '''pt''' if is_torch_available() else '''tf''' @require_sentencepiece @require_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Dict = CamembertTokenizer _A : Union[str, Any] = CamembertTokenizerFast _A : Union[str, Any] = True _A : Tuple = True def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase : Union[str, Any] = CamembertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = """<pad>""" __lowercase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__a ) , 1004 ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = CamembertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) __lowercase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowercase : List[str] = """I was born in 92000, and this is falsé.""" __lowercase : Optional[Any] = tokenizer.encode(__a ) __lowercase : List[Any] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) __lowercase : Tuple = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowercase : Dict = tokenizer.convert_ids_to_tokens(__a ) __lowercase : Any = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase : List[str] = self.get_tokenizer() __lowercase : Any = self.get_rust_tokenizer() __lowercase : Any = """I was born in 92000, and this is falsé.""" __lowercase : Tuple = tokenizer.tokenize(__a ) __lowercase : Optional[Any] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __lowercase : Any = self.get_rust_tokenizer() __lowercase : str = tokenizer.encode(__a ) __lowercase : Union[str, Any] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : str = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowercase : List[str] = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__a , )
233
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase : @staticmethod def A_ ( *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ) -> int: pass @is_pipeline_test @require_vision @require_torch class lowerCAmelCase ( unittest.TestCase ): UpperCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> str: lowerCamelCase__ : List[Any] = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCamelCase__ : Any = [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] return object_detector, examples def A_ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Any: lowerCamelCase__ : int = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase__ : Tuple = len(UpperCAmelCase ) self.assertGreater(UpperCAmelCase , 0 ) self.assertEqual( UpperCAmelCase , [ { 'score': ANY(UpperCAmelCase ), 'label': ANY(UpperCAmelCase ), 'box': {'xmin': ANY(UpperCAmelCase ), 'ymin': ANY(UpperCAmelCase ), 'xmax': ANY(UpperCAmelCase ), 'ymax': ANY(UpperCAmelCase )}, } for i in range(UpperCAmelCase ) ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def A_ ( self : Optional[Any] ) -> Union[str, Any]: pass @require_torch def A_ ( self : Union[str, Any] ) -> str: lowerCamelCase__ : Tuple = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCamelCase__ : int = object_detector( './tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, {'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}}, {'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, ] , ) lowerCamelCase__ : Tuple = object_detector( [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, {'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}}, {'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, ] ] , ) @require_torch @slow def A_ ( self : str ) -> Tuple: lowerCamelCase__ : Optional[Any] = pipeline('zero-shot-object-detection' ) lowerCamelCase__ : Any = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ] , ) lowerCamelCase__ : Tuple = object_detector( [ { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, ] , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ], [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ], ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def A_ ( self : Tuple ) -> Dict: pass @require_torch @slow def A_ ( self : Tuple ) -> List[Any]: lowerCamelCase__ : List[str] = 0.2 lowerCamelCase__ : Optional[Any] = pipeline('zero-shot-object-detection' ) lowerCamelCase__ : Optional[int] = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=UpperCAmelCase , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, ] , ) @require_torch @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: lowerCamelCase__ : Dict = 2 lowerCamelCase__ : Union[str, Any] = pipeline('zero-shot-object-detection' ) lowerCamelCase__ : int = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=UpperCAmelCase , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, ] , )
366
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig _UpperCAmelCase : Tuple = [ """openmmlab/upernet-convnext-tiny""", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring _UpperCAmelCase : List[str] = """UperNetConfig""" class lowerCAmelCase ( nn.Module ): def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[int, Tuple[int, int]] , UpperCAmelCase : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase : bool = False , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 , ) -> None: super().__init__() lowerCamelCase__ : Any = nn.Convad( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , bias=UpperCAmelCase , dilation=UpperCAmelCase , ) lowerCamelCase__ : str = nn.BatchNormad(UpperCAmelCase ) lowerCamelCase__ : Tuple = nn.ReLU() def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor: lowerCamelCase__ : Tuple = self.conv(UpperCAmelCase ) lowerCamelCase__ : int = self.batch_norm(UpperCAmelCase ) lowerCamelCase__ : List[Any] = self.activation(UpperCAmelCase ) return output class lowerCAmelCase ( nn.Module ): def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> None: super().__init__() lowerCamelCase__ : int = [ nn.AdaptiveAvgPoolad(UpperCAmelCase ), UperNetConvModule(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(UpperCAmelCase ) , UpperCAmelCase ) def A_ ( self : Union[str, Any] , UpperCAmelCase : torch.Tensor ) -> torch.Tensor: lowerCamelCase__ : Dict = input for layer in self.layers: lowerCamelCase__ : Tuple = layer(UpperCAmelCase ) return hidden_state class lowerCAmelCase ( nn.Module ): def __init__( self : Tuple , UpperCAmelCase : Tuple[int, ...] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : bool ) -> None: super().__init__() lowerCamelCase__ : int = pool_scales lowerCamelCase__ : Tuple = align_corners lowerCamelCase__ : Union[str, Any] = in_channels lowerCamelCase__ : List[Any] = channels lowerCamelCase__ : Tuple = [] for i, pool_scale in enumerate(UpperCAmelCase ): lowerCamelCase__ : Dict = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase , in_channels=UpperCAmelCase , channels=UpperCAmelCase ) self.blocks.append(UpperCAmelCase ) self.add_module(str(UpperCAmelCase ) , UpperCAmelCase ) def A_ ( self : Optional[int] , UpperCAmelCase : torch.Tensor ) -> List[torch.Tensor]: lowerCamelCase__ : Tuple = [] for ppm in self.blocks: lowerCamelCase__ : Union[str, Any] = ppm(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = nn.functional.interpolate( UpperCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners ) ppm_outs.append(UpperCAmelCase ) return ppm_outs class lowerCAmelCase ( nn.Module ): def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Any ) -> int: super().__init__() lowerCamelCase__ : Tuple = config lowerCamelCase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6) lowerCamelCase__ : List[Any] = in_channels lowerCamelCase__ : Optional[int] = config.hidden_size lowerCamelCase__ : Dict = False lowerCamelCase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module lowerCamelCase__ : Tuple = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) lowerCamelCase__ : int = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module lowerCamelCase__ : str = nn.ModuleList() lowerCamelCase__ : str = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowerCamelCase__ : str = UperNetConvModule(UpperCAmelCase , self.channels , kernel_size=1 ) lowerCamelCase__ : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(UpperCAmelCase ) self.fpn_convs.append(UpperCAmelCase ) lowerCamelCase__ : List[Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def A_ ( self : Tuple ) -> List[Any]: self.apply(self._init_weights ) def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> str: if isinstance(UpperCAmelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]: lowerCamelCase__ : str = inputs[-1] lowerCamelCase__ : List[str] = [x] psp_outs.extend(self.psp_modules(UpperCAmelCase ) ) lowerCamelCase__ : Tuple = torch.cat(UpperCAmelCase , dim=1 ) lowerCamelCase__ : Optional[Any] = self.bottleneck(UpperCAmelCase ) return output def A_ ( self : str , UpperCAmelCase : torch.Tensor ) -> torch.Tensor: # build laterals lowerCamelCase__ : Union[str, Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(UpperCAmelCase ) ) # build top-down path lowerCamelCase__ : Tuple = len(UpperCAmelCase ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowerCamelCase__ : Optional[Any] = laterals[i - 1].shape[2:] lowerCamelCase__ : Any = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=UpperCAmelCase , mode='bilinear' , align_corners=self.align_corners ) # build outputs lowerCamelCase__ : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowerCamelCase__ : Optional[Any] = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners ) lowerCamelCase__ : Dict = torch.cat(UpperCAmelCase , dim=1 ) lowerCamelCase__ : List[str] = self.fpn_bottleneck(UpperCAmelCase ) lowerCamelCase__ : int = self.classifier(UpperCAmelCase ) return output class lowerCAmelCase ( nn.Module ): def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 3 , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() lowerCamelCase__ : Any = config lowerCamelCase__ : Optional[Any] = config.auxiliary_in_channels lowerCamelCase__ : str = config.auxiliary_channels lowerCamelCase__ : Optional[Any] = config.auxiliary_num_convs lowerCamelCase__ : str = config.auxiliary_concat_input lowerCamelCase__ : List[Any] = in_index lowerCamelCase__ : List[str] = (kernel_size // 2) * dilation lowerCamelCase__ : Optional[int] = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) ) if self.num_convs == 0: lowerCamelCase__ : Optional[Any] = nn.Identity() else: lowerCamelCase__ : Optional[Any] = nn.Sequential(*UpperCAmelCase ) if self.concat_input: lowerCamelCase__ : Any = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase , padding=kernel_size // 2 ) lowerCamelCase__ : Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def A_ ( self : Tuple ) -> Tuple: self.apply(self._init_weights ) def A_ ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]: if isinstance(UpperCAmelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor: # just take the relevant feature maps lowerCamelCase__ : str = encoder_hidden_states[self.in_index] lowerCamelCase__ : Union[str, Any] = self.convs(UpperCAmelCase ) if self.concat_input: lowerCamelCase__ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) lowerCamelCase__ : Optional[int] = self.classifier(UpperCAmelCase ) return output class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = UperNetConfig UpperCAmelCase__ = """pixel_values""" UpperCAmelCase__ = True def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]: if isinstance(UpperCAmelCase , UpperCAmelCase ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def A_ ( self : str ) -> Tuple: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=False ) -> str: if isinstance(UpperCAmelCase , UpperCAmelCase ): lowerCamelCase__ : Any = value _UpperCAmelCase : List[Any] = R""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Union[str, Any] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", __UpperCamelCase, ) class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> Dict: super().__init__(UpperCAmelCase ) lowerCamelCase__ : List[str] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowerCamelCase__ : List[Any] = UperNetHead(UpperCAmelCase , in_channels=self.backbone.channels ) lowerCamelCase__ : int = UperNetFCNHead(UpperCAmelCase ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) ) @replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC ) def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: lowerCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ : str = output_attentions if output_attentions is not None else self.config.output_attentions lowerCamelCase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs( UpperCAmelCase , output_hidden_states=UpperCAmelCase , output_attentions=UpperCAmelCase ) lowerCamelCase__ : List[str] = outputs.feature_maps lowerCamelCase__ : Optional[int] = self.decode_head(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase ) lowerCamelCase__ : List[str] = None if self.auxiliary_head is not None: lowerCamelCase__ : List[Any] = self.auxiliary_head(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = nn.functional.interpolate( UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase ) lowerCamelCase__ : List[str] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one' ) else: # compute weighted loss lowerCamelCase__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowerCamelCase__ : str = loss_fct(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = loss_fct(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowerCamelCase__ : List[str] = (logits,) + outputs[1:] else: lowerCamelCase__ : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
45
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = ['''image_processor''', '''tokenizer'''] UpperCAmelCase_ : Optional[int] = '''ViTImageProcessor''' UpperCAmelCase_ : Tuple = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCAmelCase , ) lowerCAmelCase = kwargs.pop("""feature_extractor""") lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(__lowerCAmelCase , __lowerCAmelCase) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError("""You have to specify either text, visual prompt or images.""") if text is not None and visual_prompt is not None: raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) if visual_prompt is not None: lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) if images is not None: lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) if visual_prompt is not None and images is not None: lowerCAmelCase = { """pixel_values""": image_features.pixel_values, """conditional_pixel_values""": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase = { """conditional_pixel_values""": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase) , tensor_type=__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) @property def a_ ( self): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , ) return self.image_processor_class @property def a_ ( self): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , ) return self.image_processor
272
"""simple docstring""" from importlib import import_module from .logging import get_logger UpperCAmelCase : Any = get_logger(__name__) class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int=None ): '''simple docstring''' __UpperCAmelCase : Tuple = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : Any = module._original_module if isinstance(UpperCamelCase , _PatchedModuleObj ) else module class lowerCamelCase__ : """simple docstring""" __a = [] def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ): '''simple docstring''' __UpperCAmelCase : int = obj __UpperCAmelCase : Union[str, Any] = target __UpperCAmelCase : List[str] = new __UpperCAmelCase : Optional[int] = target.split(""".""" )[0] __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Union[str, Any] = attrs or [] def __enter__( self : Dict ): '''simple docstring''' *__UpperCAmelCase ,__UpperCAmelCase : str = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(UpperCamelCase ) ): try: __UpperCAmelCase : List[Any] = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __UpperCAmelCase : List[Any] = getattr(self.obj , UpperCamelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(UpperCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , UpperCamelCase , _PatchedModuleObj(UpperCamelCase , attrs=self.attrs ) ) __UpperCAmelCase : int = getattr(self.obj , UpperCamelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(UpperCamelCase , UpperCamelCase , _PatchedModuleObj(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , attrs=self.attrs ) ) __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ) # finally set the target attribute setattr(UpperCamelCase , UpperCamelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __UpperCAmelCase : int = getattr(import_module(""".""".join(UpperCamelCase ) ) , UpperCamelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , UpperCamelCase ) is attr_value: __UpperCAmelCase : Union[str, Any] = getattr(self.obj , UpperCamelCase ) setattr(self.obj , UpperCamelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __UpperCAmelCase : str = globals()["""__builtins__"""][target_attr] setattr(self.obj , UpperCamelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : str , *UpperCamelCase : Optional[int] ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , UpperCamelCase , self.original.pop(UpperCamelCase ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
115
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
368
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } a_ = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } a_ = {'facebook/blenderbot_small-90M': 5_1_2} def _a( UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =set() SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =char SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ ) return pairs class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : str="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : List[str] , ) -> Optional[int]: super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase ) with open(__lowercase , encoding='''utf-8''' ) as vocab_handle: SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict ={v: k for k, v in self.encoder.items()} with open(__lowercase , encoding='''utf-8''' ) as merges_handle: SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1] SCREAMING_SNAKE_CASE__ : List[Any] =[tuple(merge.split() ) for merge in merges] SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : Dict ={} @property def __magic_name__ ( self : Any ) -> int: return len(self.encoder ) def __magic_name__ ( self : Tuple ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str: if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase ) SCREAMING_SNAKE_CASE__ : int =re.sub('''(\')''' , r''' \1 ''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase ) if "\n" in token: SCREAMING_SNAKE_CASE__ : List[str] =token.replace('''\n''' , ''' __newln__''' ) SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' ) SCREAMING_SNAKE_CASE__ : Dict =[] for token in tokens: if not len(__lowercase ): continue SCREAMING_SNAKE_CASE__ : Union[str, Any] =token.lower() SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) SCREAMING_SNAKE_CASE__ : Optional[Any] =get_pairs(__lowercase ) if not pairs: words.append(__lowercase ) continue while True: SCREAMING_SNAKE_CASE__ : int =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =bigram SCREAMING_SNAKE_CASE__ : List[Any] =[] SCREAMING_SNAKE_CASE__ : Tuple =0 while i < len(__lowercase ): try: SCREAMING_SNAKE_CASE__ : Optional[Any] =word.index(__lowercase , __lowercase ) new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE__ : Optional[int] =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE__ : int =tuple(__lowercase ) SCREAMING_SNAKE_CASE__ : int =new_word if len(__lowercase ) == 1: break else: SCREAMING_SNAKE_CASE__ : Any =get_pairs(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] ='''@@ '''.join(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =word[:-4] SCREAMING_SNAKE_CASE__ : str =word words.append(__lowercase ) return " ".join(__lowercase ) def __magic_name__ ( self : str , __lowercase : str ) -> List[str]: SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : List[Any] =re.findall(r'''\S+\n?''' , __lowercase ) for token in words: split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) ) return split_tokens def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int: SCREAMING_SNAKE_CASE__ : int =token.lower() return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def __magic_name__ ( self : int , __lowercase : int ) -> str: return self.decoder.get(__lowercase , self.unk_token ) def __magic_name__ ( self : List[Any] , __lowercase : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip() return out_string def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE__ : str =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ : int =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' ) SCREAMING_SNAKE_CASE__ : List[str] =0 with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) SCREAMING_SNAKE_CASE__ : Any =token_index writer.write(''' '''.join(__lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file
222
0
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: # noqa: E741 while r - l > 1: __lowerCamelCase : int = (l + r) // 2 if v[m] >= key: __lowerCamelCase : str = m else: __lowerCamelCase : Tuple = m # noqa: E741 return r def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if len(lowerCamelCase__ ) == 0: return 0 __lowerCamelCase : Union[str, Any] = [0] * len(lowerCamelCase__ ) __lowerCamelCase : Optional[int] = 1 __lowerCamelCase : str = v[0] for i in range(1 , len(lowerCamelCase__ ) ): if v[i] < tail[0]: __lowerCamelCase : Optional[Any] = v[i] elif v[i] > tail[length - 1]: __lowerCamelCase : List[str] = v[i] length += 1 else: __lowerCamelCase : str = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
73
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = ['MBartTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['MBartTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'MBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'MBartForCausalLM', 'MBartForConditionalGeneration', 'MBartForQuestionAnswering', 'MBartForSequenceClassification', 'MBartModel', 'MBartPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ 'TFMBartForConditionalGeneration', 'TFMBartModel', 'TFMBartPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'FlaxMBartForConditionalGeneration', 'FlaxMBartForQuestionAnswering', 'FlaxMBartForSequenceClassification', 'FlaxMBartModel', 'FlaxMBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ : List[str] = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : str = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : int = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : List[str] = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : Union[str, Any] = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
142
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float: """simple docstring""" _snake_case = x_start _snake_case = fnc(_UpperCamelCase ) _snake_case = 0.0 for _ in range(_UpperCamelCase ): # Approximates small segments of curve as linear and solve # for trapezoidal area _snake_case = (x_end - x_start) / steps + xa _snake_case = fnc(_UpperCamelCase ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step _snake_case = xa _snake_case = fxa return area if __name__ == "__main__": def __a ( _UpperCamelCase: Any ) -> Optional[int]: """simple docstring""" return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') UpperCamelCase_ : Optional[int] = 10 while i <= 100000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
142
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = """char""" __lowercase = """bpe""" __lowercase = """wp""" lowercase : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = ["""image_processor""", """char_tokenizer"""] __lowercase = """ViTImageProcessor""" __lowercase = """MgpstrTokenizer""" def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" _snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase_ , ) _snake_case = kwargs.pop('feature_extractor' ) _snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) _snake_case = tokenizer _snake_case = AutoTokenizer.from_pretrained('gpt2' ) _snake_case = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ): """simple docstring""" if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: _snake_case = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case = encodings['input_ids'] return inputs def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case = sequences _snake_case = char_preds.size(0 ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'char' ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'bpe' ) _snake_case , _snake_case = self._decode_helper(lowerCAmelCase_ , 'wp' ) _snake_case = [] _snake_case = [] for i in range(lowerCAmelCase_ ): _snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] _snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] _snake_case = scores.index(max(lowerCAmelCase_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _snake_case = {} _snake_case = final_strs _snake_case = final_scores _snake_case = char_strs _snake_case = bpe_strs _snake_case = wp_strs return out def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if format == DecodeType.CHARACTER: _snake_case = self.char_decode _snake_case = 1 _snake_case = '[s]' elif format == DecodeType.BPE: _snake_case = self.bpe_decode _snake_case = 2 _snake_case = '#' elif format == DecodeType.WORDPIECE: _snake_case = self.wp_decode _snake_case = 1_02 _snake_case = '[SEP]' else: raise ValueError(F'Format {format} is not supported.' ) _snake_case , _snake_case = [], [] _snake_case = pred_logits.size(0 ) _snake_case = pred_logits.size(1 ) _snake_case , _snake_case = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ ) _snake_case = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:] _snake_case = decoder(lowerCAmelCase_ ) _snake_case , _snake_case = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 ) _snake_case = preds_max_prob[:, 1:] for index in range(lowerCAmelCase_ ): _snake_case = preds_str[index].find(lowerCAmelCase_ ) _snake_case = preds_str[index][:pred_eos] _snake_case = preds_index[index].cpu().tolist() _snake_case = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1 _snake_case = preds_max_prob[index][: pred_eos_index + 1] _snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCAmelCase_ ) conf_scores.append(lowerCAmelCase_ ) return dec_strs, conf_scores def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs
42
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ ) _snake_case = AutoTokenizer.from_pretrained('google/mt5-small' ) _snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids _snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids _snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss _snake_case = -(labels.shape[-1] * loss.item()) _snake_case = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
42
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[str] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE : str = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE : List[str] = { """albert-base-v1""": 5_1_2, """albert-large-v1""": 5_1_2, """albert-xlarge-v1""": 5_1_2, """albert-xxlarge-v1""": 5_1_2, """albert-base-v2""": 5_1_2, """albert-large-v2""": 5_1_2, """albert-xlarge-v2""": 5_1_2, """albert-xxlarge-v2""": 5_1_2, } _SCREAMING_SNAKE_CASE : Dict = """▁""" class __a ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = AlbertTokenizer def __init__( self : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : List[str]=False , lowercase_ : List[str]="[CLS]" , lowercase_ : List[Any]="[SEP]" , lowercase_ : int="<unk>" , lowercase_ : int="[SEP]" , lowercase_ : List[str]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ): UpperCamelCase__ : Tuple =( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCamelCase__ : str =do_lower_case UpperCamelCase__ : Optional[int] =remove_space UpperCamelCase__ : str =keep_accents UpperCamelCase__ : Any =vocab_file UpperCamelCase__ : Tuple =False if not self.vocab_file else True def _lowerCAmelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): UpperCamelCase__ : Tuple =[self.sep_token_id] UpperCamelCase__ : str =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): UpperCamelCase__ : Optional[Any] =[self.sep_token_id] UpperCamelCase__ : Dict =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCAmelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase__ : List[str] =os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
367
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Union[str, Any] = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class __a ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'transfo-xl' SCREAMING_SNAKE_CASE_ = ['mems'] SCREAMING_SNAKE_CASE_ = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Any , lowercase_ : str=26_7735 , lowercase_ : Union[str, Any]=[2_0000, 4_0000, 20_0000] , lowercase_ : Union[str, Any]=1024 , lowercase_ : Tuple=1024 , lowercase_ : int=16 , lowercase_ : str=64 , lowercase_ : Union[str, Any]=4096 , lowercase_ : Dict=4 , lowercase_ : Dict=False , lowercase_ : Dict=18 , lowercase_ : Optional[Any]=1600 , lowercase_ : str=1000 , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Any=0 , lowercase_ : Union[str, Any]=-1 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]="normal" , lowercase_ : str=0.0_1 , lowercase_ : Any=0.0_1 , lowercase_ : Union[str, Any]=0.0_2 , lowercase_ : List[str]=1e-5 , lowercase_ : Optional[int]=0 , **lowercase_ : Union[str, Any] , ): UpperCamelCase__ : Union[str, Any] =vocab_size UpperCamelCase__ : Union[str, Any] =[] self.cutoffs.extend(lowercase_ ) if proj_share_all_but_first: UpperCamelCase__ : List[Any] =[False] + [True] * len(self.cutoffs ) else: UpperCamelCase__ : Union[str, Any] =[False] + [False] * len(self.cutoffs ) UpperCamelCase__ : Dict =d_model UpperCamelCase__ : Union[str, Any] =d_embed UpperCamelCase__ : Optional[Any] =d_head UpperCamelCase__ : str =d_inner UpperCamelCase__ : List[Any] =div_val UpperCamelCase__ : Any =pre_lnorm UpperCamelCase__ : List[Any] =n_layer UpperCamelCase__ : List[str] =n_head UpperCamelCase__ : Dict =mem_len UpperCamelCase__ : Optional[Any] =same_length UpperCamelCase__ : Optional[int] =attn_type UpperCamelCase__ : Any =clamp_len UpperCamelCase__ : str =sample_softmax UpperCamelCase__ : Optional[Any] =adaptive UpperCamelCase__ : Tuple =dropout UpperCamelCase__ : Any =dropatt UpperCamelCase__ : Tuple =untie_r UpperCamelCase__ : Optional[int] =init UpperCamelCase__ : Optional[int] =init_range UpperCamelCase__ : str =proj_init_std UpperCamelCase__ : Union[str, Any] =init_std UpperCamelCase__ : Optional[Any] =layer_norm_epsilon super().__init__(eos_token_id=lowercase_ , **lowercase_ ) @property def _lowerCAmelCase ( self : str ): # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def _lowerCAmelCase ( self : List[Any] , lowercase_ : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
157
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class SCREAMING_SNAKE_CASE: """simple docstring""" def __init__( self : Optional[Any] , __snake_case : str , __snake_case : Tuple=3 , __snake_case : Dict=7 , __snake_case : List[str]=True , __snake_case : List[Any]=True , __snake_case : Any=False , __snake_case : Dict=True , __snake_case : Optional[Any]=99 , __snake_case : List[Any]=32 , __snake_case : List[Any]=5 , __snake_case : int=4 , __snake_case : Optional[Any]=37 , __snake_case : int="gelu" , __snake_case : Dict=0.1 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Optional[int]=16 , __snake_case : List[Any]=2 , __snake_case : int=0.02 , __snake_case : int=3 , __snake_case : Tuple=4 , __snake_case : Tuple=None , ) -> Optional[int]: UpperCAmelCase : str = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : str = seq_length UpperCAmelCase : Dict = is_training UpperCAmelCase : Tuple = use_input_mask UpperCAmelCase : Optional[int] = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Any = vocab_size UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Any = type_vocab_size UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : Any = num_labels UpperCAmelCase : Optional[Any] = num_choices UpperCAmelCase : Any = scope def A ( self : List[Any] ) -> int: UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[Any] = None if self.use_input_mask: UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : List[str] = None UpperCAmelCase : Union[str, Any] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[Any] ) -> List[str]: return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__snake_case , ) def A ( self : str , __snake_case : List[Any] , __snake_case : int , __snake_case : Tuple , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> Any: UpperCAmelCase : List[Any] = FalconModel(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case ) UpperCAmelCase : int = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : Any , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , ) -> List[str]: UpperCAmelCase : str = True UpperCAmelCase : Union[str, Any] = FalconModel(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : List[str] = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , ) UpperCAmelCase : int = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , ) UpperCAmelCase : Any = model(__snake_case , attention_mask=__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Optional[Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : int , __snake_case : str , __snake_case : str , __snake_case : Optional[int] , ) -> Optional[int]: UpperCAmelCase : List[Any] = FalconForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : int = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : str , __snake_case : str , ) -> int: UpperCAmelCase : List[Any] = True UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Tuple = FalconForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() # first forward pass UpperCAmelCase : List[str] = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , ) UpperCAmelCase : str = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0] UpperCAmelCase : Tuple = model( __snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0] # select random slice UpperCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) ) def A ( self : int ) -> Tuple: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = (FalconForCausalLM,) if is_torch_available() else () lowerCamelCase__ = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def A ( self : str ) -> Optional[Any]: UpperCAmelCase : Dict = FalconModelTester(self ) UpperCAmelCase : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def A ( self : int ) -> Any: self.config_tester.run_common_tests() def A ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def A ( self : List[str] ) -> List[str]: UpperCAmelCase , *UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: UpperCAmelCase : Optional[Any] = alibi self.model_tester.create_and_check_model(__snake_case , *__snake_case ) def A ( self : int ) -> Dict: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = 3 UpperCAmelCase : Union[str, Any] = input_dict['''input_ids'''] UpperCAmelCase : Any = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : Tuple = FalconForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = 3 UpperCAmelCase : Tuple = '''single_label_classification''' UpperCAmelCase : Union[str, Any] = input_dict['''input_ids'''] UpperCAmelCase : Dict = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase : Tuple = FalconForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : Optional[Any] ) -> Dict: UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = input_dict['''input_ids'''] UpperCAmelCase : Tuple = FalconForCausalLM(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Dict = model(__snake_case , use_cache=__snake_case ) UpperCAmelCase : Tuple = input_ids.shape[0] UpperCAmelCase : Any = model._convert_to_rw_cache(result.past_key_values ) UpperCAmelCase : Any = model._convert_cache_to_standard_format(__snake_case , __snake_case ) for layer in range(len(__snake_case ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def A ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = 3 UpperCAmelCase : List[Any] = '''multi_label_classification''' UpperCAmelCase : Tuple = input_dict['''input_ids'''] UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__snake_case ) UpperCAmelCase : Optional[Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase : str = FalconForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self : List[str] ) -> Tuple: # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__snake_case , '''use_cache''' ): return UpperCAmelCase : List[str] = model_class(__snake_case ).to(__snake_case ) if "use_cache" not in inputs: UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Optional[int] = model(**__snake_case ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return UpperCAmelCase : List[Any] = ( getattr(__snake_case , '''decoder_layers''' , __snake_case ) or getattr(__snake_case , '''num_decoder_layers''' , __snake_case ) or config.num_hidden_layers ) UpperCAmelCase : Any = getattr(__snake_case , '''num_kv_heads''' , config.num_attention_heads ) UpperCAmelCase : Optional[Any] = getattr(__snake_case , '''d_model''' , config.hidden_size ) UpperCAmelCase : Union[str, Any] = embed_dim // num_attention_heads UpperCAmelCase : List[str] = outputs['''past_key_values'''] self.assertEqual(len(__snake_case ) , __snake_case ) UpperCAmelCase , UpperCAmelCase : List[Any] = inputs['''input_ids'''].shape for i in range(__snake_case ): if config.new_decoder_architecture: UpperCAmelCase : Tuple = config.num_attention_heads elif config.multi_query: UpperCAmelCase : List[Any] = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" @slow def A ( self : Any ) -> Tuple: UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) UpperCAmelCase : List[str] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(__snake_case ) UpperCAmelCase : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case ) UpperCAmelCase : List[Any] = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) UpperCAmelCase : List[str] = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=19 ) UpperCAmelCase : str = tokenizer.batch_decode(__snake_case )[0] self.assertEqual(__snake_case , __snake_case ) @slow def A ( self : Tuple ) -> List[Any]: # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__snake_case ) UpperCAmelCase : List[Any] = FalconForCausalLM.from_pretrained(__snake_case ) model.eval() model.to(__snake_case ) UpperCAmelCase : List[str] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 ) model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 ) model.generate(**__snake_case , num_beams=2 , max_new_tokens=4 ) @slow def A ( self : str ) -> Optional[int]: # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__snake_case ) UpperCAmelCase : Union[str, Any] = FalconForCausalLM.from_pretrained(__snake_case ) model.eval() model.to(device=__snake_case ) UpperCAmelCase : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case ) # Test results are the same with and without cache UpperCAmelCase : Dict = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case ) UpperCAmelCase : Tuple = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
23
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : str ) -> int: with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case ) UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )] UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : List[str] ) -> Dict: UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case ) UpperCAmelCase : List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : Optional[Any] = jax.device_count() UpperCAmelCase : Tuple = num_samples * [prompt] UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng UpperCAmelCase : Tuple = replicate(__snake_case ) UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case ) UpperCAmelCase : Optional[Any] = shard(__snake_case ) UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(__snake_case ) == num_samples def A ( self : List[Any] ) -> List[str]: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case ) UpperCAmelCase : Dict = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase : Any = 50 UpperCAmelCase : Union[str, Any] = jax.device_count() UpperCAmelCase : int = num_samples * [prompt] UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng UpperCAmelCase : Dict = replicate(__snake_case ) UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case ) UpperCAmelCase : Tuple = shard(__snake_case ) UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def A ( self : int ) -> Dict: UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case ) UpperCAmelCase : Dict = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 ) UpperCAmelCase : List[str] = 50 UpperCAmelCase : Union[str, Any] = jax.device_count() UpperCAmelCase : List[Any] = num_samples * [prompt] UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng UpperCAmelCase : Tuple = replicate(__snake_case ) UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case ) UpperCAmelCase : Optional[int] = shard(__snake_case ) UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def A ( self : int ) -> Any: UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa ) UpperCAmelCase : List[str] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : List[str] = jax.random.PRNGKey(0 ) UpperCAmelCase : Union[str, Any] = 50 UpperCAmelCase : Optional[int] = jax.device_count() UpperCAmelCase : List[str] = num_samples * [prompt] UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng UpperCAmelCase : Tuple = replicate(__snake_case ) UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case ) UpperCAmelCase : str = shard(__snake_case ) UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def A ( self : Tuple ) -> Optional[Any]: UpperCAmelCase : int = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , ) UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , ) UpperCAmelCase : Tuple = scheduler.create_state() UpperCAmelCase : Dict = scheduler_state UpperCAmelCase : str = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : int = jax.random.PRNGKey(0 ) UpperCAmelCase : Union[str, Any] = 50 UpperCAmelCase : Optional[Any] = jax.device_count() UpperCAmelCase : Any = num_samples * [prompt] UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng UpperCAmelCase : str = replicate(__snake_case ) UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case ) UpperCAmelCase : Optional[int] = shard(__snake_case ) UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def A ( self : Any ) -> Tuple: UpperCAmelCase : List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCAmelCase : Union[str, Any] = jax.device_count() UpperCAmelCase : List[Any] = num_samples * [prompt] UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case ) UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , ) UpperCAmelCase : Dict = replicate(__snake_case ) UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case ) UpperCAmelCase : List[str] = shard(__snake_case ) UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1] # With memory efficient attention UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , ) UpperCAmelCase : int = replicate(__snake_case ) UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case ) UpperCAmelCase : List[Any] = shard(__snake_case ) UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) UpperCAmelCase : int = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
23
1
"""simple docstring""" from pathlib import Path import fire def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : int )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Path(snake_case ) UpperCAmelCase__ : str = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): UpperCAmelCase__ : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n] UpperCAmelCase__ : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open("w" ).write("\n".join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
298
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
1
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[Any] , __UpperCamelCase :int ): A = size A = [0] * size A = [0] * size @staticmethod def lowerCamelCase ( __UpperCamelCase :int ): return index | (index + 1) @staticmethod def lowerCamelCase ( __UpperCamelCase :int ): return (index & (index + 1)) - 1 def lowerCamelCase ( self :Tuple , __UpperCamelCase :int , __UpperCamelCase :int ): A = value while index < self.size: A = self.get_prev(__UpperCamelCase ) + 1 if current_left_border == index: A = value else: A = max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = self.get_next(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :int , __UpperCamelCase :int ): right -= 1 # Because of right is exclusive A = 0 while left <= right: A = self.get_prev(__UpperCamelCase ) if left <= current_left: A = max(__UpperCamelCase , self.tree[right] ) A = current_left else: A = max(__UpperCamelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
292
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Optional[int] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''vivit''' def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**__UpperCamelCase )
292
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = DebertaTokenizer _snake_case : Union[str, Any] = True _snake_case : List[str] = DebertaTokenizerFast def snake_case__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] _UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) _UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _UpperCamelCase = {'''unk_token''': '''[UNK]'''} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) def snake_case__ ( self : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' _UpperCamelCase = '''lower newer''' _UpperCamelCase = '''lower newer''' return input_text, output_text def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = '''lower newer''' _UpperCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = tokens + [tokenizer.unk_token] _UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = tokenizer('''Hello''' , '''World''' ) _UpperCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) _UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _UpperCamelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) _UpperCamelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] _UpperCamelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ ) _UpperCamelCase = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding['''input_ids''']] # fmt: off _UpperCamelCase = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _UpperCamelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , lowerCAmelCase__ ) for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
287
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : str=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=4 , ) -> Dict: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def snake_case__ ( self : List[str] ) -> Any: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = True _UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = True _snake_case : Optional[Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Dict ) -> List[Any]: '''simple docstring''' _UpperCamelCase = FlaxRobertaModelTester(self ) @slow def snake_case__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCAmelCase__ ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ )
287
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 4], [1, 2, 3, 4]] SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(UpperCAmelCase__ ) self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) ) with self.assertRaises(UpperCAmelCase__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(UpperCAmelCase__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _lowercase ( self : Optional[Any] ) ->Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(UpperCAmelCase__ ): DisjunctiveConstraint(UpperCAmelCase__ ) # fails here def _lowercase ( self : List[str] ) ->Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] SCREAMING_SNAKE_CASE : int = DisjunctiveConstraint(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = dc.update(1 ) SCREAMING_SNAKE_CASE : str = stepped is True and completed is False and reset is False self.assertTrue(UpperCAmelCase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) SCREAMING_SNAKE_CASE : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(UpperCAmelCase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) SCREAMING_SNAKE_CASE : List[str] = stepped is True and completed is True and reset is False self.assertTrue(UpperCAmelCase__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _lowercase ( self : Dict ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] SCREAMING_SNAKE_CASE : Dict = DisjunctiveConstraint(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
245
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ : """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : List[str]=[3_0, 3_0] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=1_0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=1_0 , ) ->Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : List[Any] = patch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : str = is_training SCREAMING_SNAKE_CASE : Dict = use_labels SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : str = num_labels SCREAMING_SNAKE_CASE : Dict = scope SCREAMING_SNAKE_CASE : Optional[Any] = n_targets SCREAMING_SNAKE_CASE : Dict = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens SCREAMING_SNAKE_CASE : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) SCREAMING_SNAKE_CASE : int = num_patches + 1 + self.num_detection_tokens def _lowercase ( self : Tuple ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) SCREAMING_SNAKE_CASE : int = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) SCREAMING_SNAKE_CASE : str = [] for i in range(self.batch_size ): SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = torch.rand(self.n_targets , 4 , device=UpperCAmelCase__ ) labels.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowercase ( self : Dict ) ->Optional[Any]: """simple docstring""" return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = YolosModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _lowercase ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = YolosForObjectDetection(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) SCREAMING_SNAKE_CASE : int = model(pixel_values=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _lowercase ( self : Dict ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs SCREAMING_SNAKE_CASE : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple =(YolosModel, YolosForObjectDetection) if is_torch_available() else () UpperCAmelCase__ : Any =( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) UpperCAmelCase__ : Tuple =False UpperCAmelCase__ : int =False UpperCAmelCase__ : Tuple =False UpperCAmelCase__ : Optional[Any] =False def _lowercase ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=False ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": SCREAMING_SNAKE_CASE : List[str] = [] for i in range(self.model_tester.batch_size ): SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones( size=(self.model_tester.n_targets,) , device=UpperCAmelCase__ , dtype=torch.long ) SCREAMING_SNAKE_CASE : str = torch.ones( self.model_tester.n_targets , 4 , device=UpperCAmelCase__ , dtype=torch.float ) labels.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = labels return inputs_dict def _lowercase ( self : Dict ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = YolosModelTester(self ) SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ) ->List[str]: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : List[Any] ) ->int: """simple docstring""" pass def _lowercase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def _lowercase ( self : List[Any] ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) ->Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) ->Dict: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[Any] = True # in YOLOS, the seq_len is different SCREAMING_SNAKE_CASE : Any = self.model_tester.expected_seq_len for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE : Tuple = True SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : Tuple = True SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Optional[int] = 1 self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : str = outputs.attentions self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowercase ( self : Any ) ->str: """simple docstring""" def check_hidden_states_output(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ): SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states SCREAMING_SNAKE_CASE : str = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) # YOLOS has a different seq_length SCREAMING_SNAKE_CASE : Tuple = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Any = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Any = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Any ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase__ ) @slow def _lowercase ( self : str ) ->List[Any]: """simple docstring""" for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : str = YolosModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def __lowercase ( ) -> List[Any]: SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : int ) ->Union[str, Any]: """simple docstring""" return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None @slow def _lowercase ( self : List[Any] ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : str = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(inputs.pixel_values ) # verify outputs SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) ) # verify postprocessing SCREAMING_SNAKE_CASE : int = image_processor.post_process_object_detection( UpperCAmelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = [7_5, 7_5, 1_7, 6_3, 1_7] SCREAMING_SNAKE_CASE : List[str] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(UpperCAmelCase__ ) self.assertEqual(len(results["""scores"""] ) , 5 ) self.assertTrue(torch.allclose(results["""scores"""] , UpperCAmelCase__ , atol=1e-4 ) ) self.assertSequenceEqual(results["""labels"""].tolist() , UpperCAmelCase__ ) self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCAmelCase__ ) )
245
1
'''simple docstring''' import argparse from collections import defaultdict import yaml lowercase : List[Any] = 'docs/source/en/_toctree.yml' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 A : List[str] = [key for key, value in counts.items() if value > 1] A : Dict = [] for duplicate_key in duplicates: A : Union[str, Any] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def lowerCAmelCase_ ( snake_case__=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: A : int = yaml.safe_load(f.read() ) # Get to the API doc A : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 A : Optional[Any] = content[api_idx]['''sections'''] # Then to the model doc A : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 A : Optional[int] = api_doc[model_idx]['''sections'''] A : Optional[Any] = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] A : Optional[Any] = False for idx, modality_doc in modalities_docs: A : Tuple = modality_doc['''sections'''] A : Tuple = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: A : Union[str, Any] = True if overwrite: A : List[Any] = new_modality_doc if diff: if overwrite: A : int = model_doc A : Dict = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[str] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
1
'''simple docstring''' import math import os import sys def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : str = '' try: with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file: __a : str = binary_file.read() for dat in data: __a : str = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict[str, str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): lexicon.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = last_match_id if math.loga(_SCREAMING_SNAKE_CASE ).is_integer(): for curr_key in lexicon: __a : List[Any] = '0' + lexicon[curr_key] __a : Optional[Any] = bin(_SCREAMING_SNAKE_CASE )[2:] def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = {'0': '0', '1': '1'} __a , __a : Union[str, Any] = '', '' __a : Dict = len(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __a : Optional[int] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) index += 1 __a : List[Any] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __a : List[Any] = lexicon[curr_string] result += last_match_id return result def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : Any = os.path.getsize(_SCREAMING_SNAKE_CASE ) __a : int = bin(_SCREAMING_SNAKE_CASE )[2:] __a : Dict = len(_SCREAMING_SNAKE_CASE ) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : Any = 8 try: with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file: __a : Optional[int] = [ to_write[i : i + byte_length] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = read_file_binary(_SCREAMING_SNAKE_CASE ) __a : Tuple = compress_data(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
27
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = AltDiffusionPipeline lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def A ( self : Dict ) -> int: torch.manual_seed(0 ) UpperCAmelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) UpperCAmelCase : Dict = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) UpperCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) UpperCAmelCase : List[Any] = CLIPTextModel(__snake_case ) UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) UpperCAmelCase : Optional[int] = 77 UpperCAmelCase : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str]=0 ) -> Union[str, Any]: if str(__snake_case ).startswith('''mps''' ): UpperCAmelCase : str = torch.manual_seed(__snake_case ) else: UpperCAmelCase : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) UpperCAmelCase : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Union[str, Any] ) -> List[str]: super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def A ( self : Tuple ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def A ( self : Tuple ) -> Optional[int]: UpperCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Any = self.get_dummy_components() torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase : List[str] = RobertaSeriesModelWithTransformation(__snake_case ) UpperCAmelCase : str = text_encoder UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case ) UpperCAmelCase : str = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : Optional[int] = '''A photo of an astronaut''' UpperCAmelCase : List[Any] = alt_pipe(**__snake_case ) UpperCAmelCase : Optional[Any] = output.images UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase : List[str] = np.array( [0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : int ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__snake_case ) torch.manual_seed(0 ) UpperCAmelCase : int = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase : Union[str, Any] = RobertaSeriesModelWithTransformation(__snake_case ) UpperCAmelCase : Union[str, Any] = text_encoder UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case ) UpperCAmelCase : Dict = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : int = self.get_dummy_inputs(__snake_case ) UpperCAmelCase : Optional[int] = alt_pipe(**__snake_case ) UpperCAmelCase : Optional[int] = output.images UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase : Optional[int] = np.array( [0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : str ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ) -> Any: # make sure here that pndm scheduler skips prk UpperCAmelCase : List[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__snake_case ) UpperCAmelCase : Tuple = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger''' UpperCAmelCase : Any = torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase : Dict = output.images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Tuple ) -> int: UpperCAmelCase : int = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__snake_case , safety_checker=__snake_case ) UpperCAmelCase : Dict = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : Tuple = '''A painting of a squirrel eating a burger''' UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase : List[Any] = alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''numpy''' ) UpperCAmelCase : Dict = output.images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
23
0
'''simple docstring''' from __future__ import annotations def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] , lowercase : int ) -> tuple[float, list[float]]: _a = list(range(len(lowercase ) ) ) _a = [v / w for v, w in zip(lowercase , lowercase )] index.sort(key=lambda lowercase : ratio[i] , reverse=lowercase ) _a = 0 _a = [0] * len(lowercase ) for i in index: if weight[i] <= capacity: _a = 1 max_value += value[i] capacity -= weight[i] else: _a = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
369
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ): super().__init__() _a = nn.Embedding(__a , __a ) _a = nn.Embedding(__a , __a ) _a = False _a = nn.Dropout(p=__a ) _a = TaConfig( vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , ) _a = nn.ModuleList() for lyr_num in range(__a ): _a = TaBlock(__a ) self.encoders.append(__a ) _a = TaLayerNorm(__a ) _a = nn.Dropout(p=__a ) def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ): _a = self.token_embedder(__a ) _a = encoder_input_tokens.shape[1] _a = torch.arange(__a , device=encoder_input_tokens.device ) x += self.position_encoding(__a ) _a = self.dropout_pre(__a ) # inverted the attention mask _a = encoder_input_tokens.size() _a = self.get_extended_attention_mask(__a , __a ) for lyr in self.encoders: _a = lyr(__a , __a )[0] _a = self.layer_norm(__a ) return self.dropout_post(__a ), encoder_inputs_mask
346
0
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> str: """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(snake_case__ ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _UpperCAmelCase : List[str] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _UpperCAmelCase : Optional[int] = PipelineDataFormat.from_str( format=snake_case__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(snake_case__ , snake_case__ ) class lowerCamelCase_ (A__ ): '''simple docstring''' def __init__( self : Dict , A : Pipeline , A : PipelineDataFormat ): _UpperCAmelCase : str = nlp _UpperCAmelCase : Tuple = reader @staticmethod def _A ( A : ArgumentParser ): _UpperCAmelCase : Optional[Any] = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=__UpperCAmelCase , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=__UpperCAmelCase , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=__UpperCAmelCase , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=__UpperCAmelCase , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=__UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=__UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=__UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=__UpperCAmelCase ) def _A ( self : List[Any] ): _UpperCAmelCase , _UpperCAmelCase : Any = self._nlp, [] for entry in self._reader: _UpperCAmelCase : Dict = nlp(**__UpperCAmelCase ) if self._reader.is_multi_columns else nlp(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): outputs.append(__UpperCAmelCase ) else: outputs += output # Saving data if self._nlp.binary_output: _UpperCAmelCase : Any = self._reader.save_binary(__UpperCAmelCase ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__UpperCAmelCase )
31
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) SCREAMING_SNAKE_CASE__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' if split_mlp_wi: SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] SCREAMING_SNAKE_CASE__ = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def A ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(variables["""target"""] ) SCREAMING_SNAKE_CASE__ = {"""/""".join(snake_case__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE__ = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , snake_case__ ) SCREAMING_SNAKE_CASE__ = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE__ = old["""token_embedder/embedding"""] # Encoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" ) SCREAMING_SNAKE_CASE__ = layer_norm SCREAMING_SNAKE_CASE__ = k.T SCREAMING_SNAKE_CASE__ = o.T SCREAMING_SNAKE_CASE__ = q.T SCREAMING_SNAKE_CASE__ = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ ) SCREAMING_SNAKE_CASE__ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE__ = wi[0].T SCREAMING_SNAKE_CASE__ = wi[1].T else: SCREAMING_SNAKE_CASE__ = wi.T SCREAMING_SNAKE_CASE__ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup( snake_case__ , snake_case__ , """encoder""" ).T SCREAMING_SNAKE_CASE__ = old["""encoder/encoder_norm/scale"""] if not scalable_attention: SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup( snake_case__ , 0 , """encoder""" ).T SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup( snake_case__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(snake_case__ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" ) SCREAMING_SNAKE_CASE__ = layer_norm SCREAMING_SNAKE_CASE__ = k.T SCREAMING_SNAKE_CASE__ = o.T SCREAMING_SNAKE_CASE__ = q.T SCREAMING_SNAKE_CASE__ = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" ) SCREAMING_SNAKE_CASE__ = layer_norm SCREAMING_SNAKE_CASE__ = k.T SCREAMING_SNAKE_CASE__ = o.T SCREAMING_SNAKE_CASE__ = q.T SCREAMING_SNAKE_CASE__ = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ ) SCREAMING_SNAKE_CASE__ = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE__ = wi[0].T SCREAMING_SNAKE_CASE__ = wi[1].T else: SCREAMING_SNAKE_CASE__ = wi.T SCREAMING_SNAKE_CASE__ = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T SCREAMING_SNAKE_CASE__ = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE__ = old["""decoder/logits_dense/kernel"""].T return new def A ( snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""] return state_dict def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(snake_case__ ) SCREAMING_SNAKE_CASE__ = convert_tax_to_pytorch( snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ ) SCREAMING_SNAKE_CASE__ = make_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ , strict=snake_case__ ) def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = MTaConfig.from_json_file(snake_case__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE__ = UMTaEncoderModel(snake_case__ ) else: SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration(snake_case__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(snake_case__ ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case__ ) print("""Done""" ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) A_ : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
165
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = MgpstrTokenizer snake_case_ = False snake_case_ = {} snake_case_ = False def lowercase_ ( self ) -> List[Any]: '''simple docstring''' super().setUp() # fmt: off __lowerCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on __lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + '\n' ) def lowercase_ ( self , **lowerCamelCase__ ) -> str: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = 'tester' __lowerCamelCase = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def lowercase_ ( self ) -> int: '''simple docstring''' pass def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) __lowerCamelCase = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) __lowerCamelCase = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) self.assertTrue(special_token not in decoded ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(lowerCamelCase__ ) __lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) __lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertNotEqual(len(lowerCamelCase__ ) , 0 ) __lowerCamelCase = tokenizer.decode(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(text_a.replace(' ' , '' ) , lowerCamelCase__ ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def lowercase_ ( self ) -> str: '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' pass
348
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __A = logging.get_logger(__name__) __A = TypeVar("DatasetType", Dataset, IterableDataset) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) else: return _interleave_iterable_datasets( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase__ ): if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" ) if i == 0: __lowerCamelCase , __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ ) else: return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
348
1
"""simple docstring""" import baseaa def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bytes: return baseaa.baaencode(string.encode('''utf-8''' ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: return baseaa.baadecode(__UpperCAmelCase ).decode('''utf-8''' ) if __name__ == "__main__": __A = "Hello World!" __A = baseaa_encode(test) print(encoded) __A = baseaa_decode(encoded) print(decoded)
177
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __A = False class UpperCAmelCase (unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: List[str] = '''A painting of a squirrel eating a burger ''' lowercase__: str = torch.manual_seed(0 ) lowercase__: Union[str, Any] = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCAmelCase ) lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = generator.manual_seed(0 ) lowercase__: List[str] = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def _snake_case ( self ): lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Tuple = '''A painting of a squirrel eating a burger ''' lowercase__: Optional[Any] = torch.manual_seed(0 ) lowercase__: Tuple = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
177
1
from ...configuration_utils import PretrainedConfig class __snake_case ( lowerCAmelCase ): _a : List[Any]= "bert-generation" def __init__( self ,snake_case=50358 ,snake_case=1024 ,snake_case=24 ,snake_case=16 ,snake_case=4096 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=0 ,snake_case=2 ,snake_case=1 ,snake_case="absolute" ,snake_case=True ,**snake_case ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) lowercase : Union[str, Any] = vocab_size lowercase : Optional[int] = hidden_size lowercase : Dict = num_hidden_layers lowercase : str = num_attention_heads lowercase : List[Any] = hidden_act lowercase : Union[str, Any] = intermediate_size lowercase : int = hidden_dropout_prob lowercase : int = attention_probs_dropout_prob lowercase : Optional[Any] = max_position_embeddings lowercase : Tuple = initializer_range lowercase : Optional[Any] = layer_norm_eps lowercase : List[str] = position_embedding_type lowercase : List[str] = use_cache
361
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase : str = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: lowercase : Optional[int] = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) lowercase : List[Any] = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any: lowercase : Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase : Any = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = ( """The number of weights must be the same as the number of values.\n""" f"But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values" ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = ( """All weights must be integers but got weight of """ f"type {type(wt[i] )} at index {i}" ) raise TypeError(SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Any = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : set = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase : Dict = [3, 2, 4, 4] lowercase : List[Any] = [4, 3, 2, 3] lowercase : Tuple = 4 lowercase : Tuple = 6 lowercase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase , lowercase : List[str] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase , lowercase : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
8
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ : Optional[Any] = logging.get_logger(__name__) def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Dict = DPTConfig() if "large" in checkpoint_url: _UpperCAmelCase : List[Any] = 1024 _UpperCAmelCase : Optional[int] = 4096 _UpperCAmelCase : Tuple = 24 _UpperCAmelCase : List[str] = 16 _UpperCAmelCase : str = [5, 11, 17, 23] _UpperCAmelCase : Tuple = [256, 512, 1024, 1024] _UpperCAmelCase : List[str] = (1, 384, 384) if "ade" in checkpoint_url: _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : Tuple = 150 _UpperCAmelCase : Tuple = """huggingface/label-files""" _UpperCAmelCase : int = """ade20k-id2label.json""" _UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) ) _UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _UpperCAmelCase : Tuple = idalabel _UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} _UpperCAmelCase : Optional[int] = [1, 150, 480, 480] return config, expected_shape def snake_case_ ( lowerCAmelCase_ )-> str: '''simple docstring''' _UpperCAmelCase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ )-> Any: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): _UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: _UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: _UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: _UpperCAmelCase : int = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: _UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: _UpperCAmelCase : List[str] = name.replace("""proj""" , """projection""" ) if "blocks" in name: _UpperCAmelCase : Dict = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: _UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: _UpperCAmelCase : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: _UpperCAmelCase : Dict = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: _UpperCAmelCase : Optional[Any] = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: _UpperCAmelCase : List[Any] = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: _UpperCAmelCase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: _UpperCAmelCase : List[str] = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: _UpperCAmelCase : str = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: _UpperCAmelCase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 _UpperCAmelCase : Tuple = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: _UpperCAmelCase : List[Any] = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: _UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: _UpperCAmelCase : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: _UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: _UpperCAmelCase : List[str] = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: _UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: _UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: _UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: _UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: _UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: _UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: _UpperCAmelCase : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: _UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: _UpperCAmelCase : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: _UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: _UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: _UpperCAmelCase : Any = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: _UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" ) if "head" in name: _UpperCAmelCase : Dict = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: _UpperCAmelCase : List[Any] = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: _UpperCAmelCase : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :] _UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size] _UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase : Tuple = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :] def snake_case_ ( )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: '''simple docstring''' _UpperCAmelCase ,_UpperCAmelCase : List[str] = get_dpt_config(lowerCAmelCase_ ) # load original state_dict from URL _UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(lowerCAmelCase_ ) # rename keys for key in state_dict.copy().keys(): _UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase_ ) _UpperCAmelCase : Dict = val # read in qkv matrices read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ ) # load HuggingFace model _UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() # Check outputs on an image _UpperCAmelCase : Tuple = 480 if """ade""" in checkpoint_url else 384 _UpperCAmelCase : List[str] = DPTImageProcessor(size=lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = prepare_img() _UpperCAmelCase : Dict = image_processor(lowerCAmelCase_ , return_tensors="""pt""" ) # forward pass _UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth # Assert logits _UpperCAmelCase : Optional[int] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: _UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(lowerCAmelCase_ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ ) ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) A_ : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
215
0
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class a ( a__ , unittest.TestCase ): snake_case__ = CpmAntTokenizer snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) lowerCAmelCase = '今天天气真好!' lowerCAmelCase = ['今天', '天气', '真', '好', '!'] lowerCAmelCase = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) lowerCAmelCase = '今天天气真好!' lowerCAmelCase = [tokenizer.bos_token] + tokens lowerCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) lowerCAmelCase = tokenizer.decode(_snake_case ) self.assertEqual(_snake_case , _snake_case )
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a :List[Any] = logging.get_logger(__name__) __a :Tuple = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class _a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" _lowerCamelCase : int = 'sew' def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : str=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : List[str]=3072 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Optional[int]=1E-5 , UpperCAmelCase : Dict="group" , UpperCAmelCase : Any="gelu" , UpperCAmelCase : int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Union[str, Any]=128 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=0.05 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : int=10 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : List[Any]="mean" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : List[Any]=2 , **UpperCAmelCase : Union[str, Any] , ): super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) A_ = hidden_size A_ = feat_extract_norm A_ = feat_extract_activation A_ = list(_a ) A_ = list(_a ) A_ = list(_a ) A_ = conv_bias A_ = num_conv_pos_embeddings A_ = num_conv_pos_embedding_groups A_ = len(self.conv_dim ) A_ = num_hidden_layers A_ = intermediate_size A_ = squeeze_factor A_ = hidden_act A_ = num_attention_heads A_ = hidden_dropout A_ = attention_dropout A_ = activation_dropout A_ = feat_proj_dropout A_ = final_dropout A_ = layerdrop A_ = layer_norm_eps A_ = initializer_range A_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ = apply_spec_augment A_ = mask_time_prob A_ = mask_time_length A_ = mask_time_min_masks A_ = mask_feature_prob A_ = mask_feature_length A_ = mask_feature_min_masks # ctc loss A_ = ctc_loss_reduction A_ = ctc_zero_infinity # sequence classification A_ = use_weighted_layer_sum A_ = classifier_proj_size @property def __A ( self : Any ): return functools.reduce(operator.mul , self.conv_stride , 1 )
312
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , _a=None , **_a ): warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , _a , ) super().__init__(args=_a , **_a )
45
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def lowerCamelCase ( a_ , a_=False ) -> Tuple: lowerCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCAmelCase_ = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' ) if "norm" in key: lowerCAmelCase_ = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' ) if "layer_norm1" in key: lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ = key[key.find('block' ) + len('block' )] lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' ) if "attn.q" in key: lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase_ = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase_ = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase_ = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' ) if key.startswith('head' ): lowerCAmelCase_ = key.replace('head' , 'classifier' ) lowerCAmelCase_ = value return new_state_dict def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase ( ) -> Optional[int]: lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ) return image @torch.no_grad() def lowerCamelCase ( a_ , a_ , a_ ) -> int: lowerCAmelCase_ = SegformerConfig() lowerCAmelCase_ = False # set attributes based on model_name lowerCAmelCase_ = 'huggingface/label-files' if "segformer" in model_name: lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCAmelCase_ = 150 lowerCAmelCase_ = 'ade20k-id2label.json' lowerCAmelCase_ = (1, 150, 128, 128) elif "city" in model_name: lowerCAmelCase_ = 19 lowerCAmelCase_ = 'cityscapes-id2label.json' lowerCAmelCase_ = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: lowerCAmelCase_ = True lowerCAmelCase_ = model_name[4:6] lowerCAmelCase_ = 1_000 lowerCAmelCase_ = 'imagenet-1k-id2label.json' lowerCAmelCase_ = (1, 1_000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 256 elif size == "b2": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 6, 3] elif size == "b3": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 4, 18, 3] elif size == "b4": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 8, 27, 3] elif size == "b5": lowerCAmelCase_ = [64, 128, 320, 512] lowerCAmelCase_ = 768 lowerCAmelCase_ = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) lowerCAmelCase_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ ) # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) ) else: lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(a_ , a_ ) # create HuggingFace model and load state dict if encoder_only: lowerCAmelCase_ = False lowerCAmelCase_ = SegformerForImageClassification(a_ ) else: lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass lowerCAmelCase_ = model(a_ ) lowerCAmelCase_ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCAmelCase_ = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCAmelCase_ = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCAmelCase_ = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
def lowerCamelCase ( a_ ) -> bool: lowerCAmelCase_ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowerCAmelCase_ = set() return any( node not in visited and depth_first_search(a_ , a_ , a_ , a_ ) for node in graph ) def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool: visited.add(a_ ) rec_stk.add(a_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(a_ , a_ , a_ , a_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(a_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
14
1
from __future__ import annotations def _lowerCAmelCase ( lowerCAmelCase_ :list[int] , lowerCAmelCase_ :int )->list[list[int]]: '''simple docstring''' snake_case_ = [] snake_case_ = [] snake_case_ = 0 snake_case_ = sum(SCREAMING_SNAKE_CASE_ ) create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return result def _lowerCAmelCase ( lowerCAmelCase_ :list[int] , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :list[int] , lowerCAmelCase_ :list[list[int]] , lowerCAmelCase_ :int , )->None: '''simple docstring''' if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE_ ) == max_sum: result.append(SCREAMING_SNAKE_CASE_ ) return for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , ) SCREAMING_SNAKE_CASE :Any = [3, 34, 4, 12, 5, 2] SCREAMING_SNAKE_CASE :Dict = 9 SCREAMING_SNAKE_CASE :List[Any] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
159
"""simple docstring""" from math import isclose, sqrt def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]: SCREAMING_SNAKE_CASE = point_y / 4 / point_x SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4 SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 SCREAMING_SNAKE_CASE = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) SCREAMING_SNAKE_CASE = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = first_x_coord SCREAMING_SNAKE_CASE = first_y_coord SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
113
0
'''simple docstring''' import argparse import os import re import packaging.version __snake_case : Tuple = 'examples/' __snake_case : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __snake_case : Dict = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } __snake_case : Optional[int] = 'README.md' def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Any, _UpperCamelCase : List[str] ) -> Optional[int]: with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: A_ = f.read() A_ ,A_ = REPLACE_PATTERNS[pattern] A_ = replace.replace('''VERSION''', _UpperCamelCase ) A_ = re_pattern.sub(_UpperCamelCase, _UpperCamelCase ) with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.write(_UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> str: for folder, directories, fnames in os.walk(_UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_UpperCamelCase, _UpperCamelCase ), _UpperCamelCase, pattern='''examples''' ) def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) if not patch: update_version_in_examples(_UpperCamelCase ) def _UpperCAmelCase ( ) -> int: A_ = '''🤗 Transformers currently provides the following architectures''' A_ = '''1. Want to contribute a new model?''' with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: A_ = f.readlines() # Find the start of the list. A_ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 A_ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): A_ = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', ) index += 1 with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.writelines(_UpperCamelCase ) def _UpperCAmelCase ( ) -> str: with open(REPLACE_FILES['''init'''], '''r''' ) as f: A_ = f.read() A_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0] return packaging.version.parse(_UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : List[Any]=False ) -> Tuple: A_ = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: A_ = default_version.base_version elif patch: A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: A_ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. A_ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(_UpperCamelCase ) == 0: A_ = default_version print(F'''Updating version to {version}.''' ) global_version_update(_UpperCamelCase, patch=_UpperCamelCase ) def _UpperCAmelCase ( ) -> str: A_ = get_version() A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' A_ = current_version.base_version # Check with the user we got that right. A_ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(_UpperCamelCase ) == 0: A_ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(_UpperCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __snake_case : str = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
18
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __snake_case : str = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __A ( cls ) -> Dict: A_ = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def __A ( cls ) -> Optional[int]: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __A ( self ) -> str: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def __A ( self ) -> List[str]: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict: A_ = True A_ = flatten_dict(modela.params ) A_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: A_ = False return models_are_equal @require_flax class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __A ( self ) -> List[str]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> List[Any]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> Dict: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __A ( self ) -> Optional[Any]: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
18
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def _a ( UpperCAmelCase=None ) -> int: """simple docstring""" lowerCamelCase__ : int = argparse.ArgumentParser(add_help=UpperCAmelCase , allow_abbrev=UpperCAmelCase ) # The main config parser lowerCamelCase__ : str = config_command_parser(UpperCAmelCase ) # The subparser to add commands to lowerCamelCase__ : List[str] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(UpperCAmelCase , parents=[parent_parser] ) update_command_parser(UpperCAmelCase , parents=[parent_parser] ) return config_parser def _a ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__ : Optional[Any] = get_config_parser() lowerCamelCase__ : Optional[int] = config_parser.parse_args() if not hasattr(UpperCAmelCase , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(UpperCAmelCase ) if __name__ == "__main__": main()
142
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Dict , A : int , A : int , A : int , A : Union[str, Any]=0.0 , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : str = "layer_norm" , A : bool = False , ) ->Any: super().__init__() lowerCamelCase__ : int = only_cross_attention lowerCamelCase__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' lowerCamelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowerCamelCase__ : Optional[Any] = AdaLayerNorm(A , A ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ : int = AdaLayerNormZero(A , A ) else: lowerCamelCase__ : Dict = nn.LayerNorm(A , elementwise_affine=A ) lowerCamelCase__ : Any = Attention( query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowerCamelCase__ : Tuple = ( AdaLayerNorm(A , A ) if self.use_ada_layer_norm else nn.LayerNorm(A , elementwise_affine=A ) ) lowerCamelCase__ : int = Attention( query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none else: lowerCamelCase__ : Dict = None lowerCamelCase__ : Tuple = None # 3. Feed-forward lowerCamelCase__ : Optional[int] = nn.LayerNorm(A , elementwise_affine=A ) lowerCamelCase__ : Union[str, Any] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A ) # let chunk size default to None lowerCamelCase__ : str = None lowerCamelCase__ : Tuple = 0 def __lowerCamelCase ( self : Any , A : Optional[int] , A : int ) ->List[str]: # Sets chunk feed-forward lowerCamelCase__ : List[Any] = chunk_size lowerCamelCase__ : List[str] = dim def __lowerCamelCase ( self : str , A : torch.FloatTensor , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Dict[str, Any] = None , A : Optional[torch.LongTensor] = None , ) ->Tuple: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: lowerCamelCase__ : Union[str, Any] = self.norma(A , A ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.norma( A , A , A , hidden_dtype=hidden_states.dtype ) else: lowerCamelCase__ : List[str] = self.norma(A ) lowerCamelCase__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {} lowerCamelCase__ : Any = self.attna( A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Any = gate_msa.unsqueeze(1 ) * attn_output lowerCamelCase__ : Optional[int] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowerCamelCase__ : int = ( self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A ) ) lowerCamelCase__ : int = self.attna( A , encoder_hidden_states=A , attention_mask=A , **A , ) lowerCamelCase__ : Any = attn_output + hidden_states # 3. Feed-forward lowerCamelCase__ : Union[str, Any] = self.norma(A ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) lowerCamelCase__ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowerCamelCase__ : Optional[int] = torch.cat( [self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: lowerCamelCase__ : Optional[int] = self.ff(A ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output lowerCamelCase__ : List[Any] = ff_output + hidden_states return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : Optional[int] = None , A : int = 4 , A : float = 0.0 , A : str = "geglu" , A : bool = False , ) ->int: super().__init__() lowerCamelCase__ : List[Any] = int(dim * mult ) lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": lowerCamelCase__ : int = GELU(A , A ) if activation_fn == "gelu-approximate": lowerCamelCase__ : Optional[int] = GELU(A , A , approximate='''tanh''' ) elif activation_fn == "geglu": lowerCamelCase__ : Any = GEGLU(A , A ) elif activation_fn == "geglu-approximate": lowerCamelCase__ : int = ApproximateGELU(A , A ) lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] ) # project in self.net.append(A ) # project dropout self.net.append(nn.Dropout(A ) ) # project out self.net.append(nn.Linear(A , A ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(A ) ) def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Optional[Any]: for module in self.net: lowerCamelCase__ : int = module(A ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Tuple , A : int , A : int , A : str = "none" ) ->Optional[Any]: super().__init__() lowerCamelCase__ : List[Any] = nn.Linear(A , A ) lowerCamelCase__ : Any = approximate def __lowerCamelCase ( self : List[str] , A : Tuple ) ->str: if gate.device.type != "mps": return F.gelu(A , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __lowerCamelCase ( self : List[str] , A : str ) ->Optional[int]: lowerCamelCase__ : List[str] = self.proj(A ) lowerCamelCase__ : Optional[int] = self.gelu(A ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Tuple , A : int , A : int ) ->Dict: super().__init__() lowerCamelCase__ : Optional[Any] = nn.Linear(A , dim_out * 2 ) def __lowerCamelCase ( self : List[Any] , A : List[Any] ) ->Tuple: if gate.device.type != "mps": return F.gelu(A ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->Any: lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.proj(A ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(A ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : int ) ->str: super().__init__() lowerCamelCase__ : Optional[int] = nn.Linear(A , A ) def __lowerCamelCase ( self : Union[str, Any] , A : Dict ) ->Optional[Any]: lowerCamelCase__ : List[str] = self.proj(A ) return x * torch.sigmoid(1.7_02 * x ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : int , A : Dict , A : Optional[Any] ) ->str: super().__init__() lowerCamelCase__ : List[str] = nn.Embedding(A , A ) lowerCamelCase__ : str = nn.SiLU() lowerCamelCase__ : int = nn.Linear(A , embedding_dim * 2 ) lowerCamelCase__ : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A ) def __lowerCamelCase ( self : int , A : Union[str, Any] , A : Union[str, Any] ) ->Union[str, Any]: lowerCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(A ) ) ) lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(A , 2 ) lowerCamelCase__ : Any = self.norm(A ) * (1 + scale) + shift return x class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , A : Optional[Any] , A : int ) ->str: super().__init__() lowerCamelCase__ : Union[str, Any] = CombinedTimestepLabelEmbeddings(A , A ) lowerCamelCase__ : int = nn.SiLU() lowerCamelCase__ : List[str] = nn.Linear(A , 6 * embedding_dim , bias=A ) lowerCamelCase__ : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 ) def __lowerCamelCase ( self : List[str] , A : Any , A : List[Any] , A : Tuple , A : Dict=None ) ->Union[str, Any]: lowerCamelCase__ : List[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = emb.chunk(6 , dim=1 ) lowerCamelCase__ : List[Any] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : int , A : int , A : Optional[str] = None , A : float = 1e-5 ) ->Any: super().__init__() lowerCamelCase__ : int = num_groups lowerCamelCase__ : List[str] = eps if act_fn is None: lowerCamelCase__ : Tuple = None else: lowerCamelCase__ : Dict = get_activation(A ) lowerCamelCase__ : Any = nn.Linear(A , out_dim * 2 ) def __lowerCamelCase ( self : List[str] , A : Optional[int] , A : str ) ->Tuple: if self.act: lowerCamelCase__ : Union[str, Any] = self.act(A ) lowerCamelCase__ : Optional[Any] = self.linear(A ) lowerCamelCase__ : Optional[Any] = emb[:, :, None, None] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1 ) lowerCamelCase__ : str = F.group_norm(A , self.num_groups , eps=self.eps ) lowerCamelCase__ : Dict = x * (1 + scale) + shift return x
142
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
19
import math from collections.abc import Iterator from itertools import takewhile def UpperCAmelCase_( a__ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 2 while True: if is_prime(a__ ): yield num num += 1 def UpperCAmelCase_( a__ = 2_000_000 ): """simple docstring""" return sum(takewhile(lambda a__ : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"{solution() = }")
19
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict = True , __lowerCamelCase : str = None , __lowerCamelCase : List[str] = 32 , __lowerCamelCase : Any = True , __lowerCamelCase : int = 1 / 255 , __lowerCamelCase : Optional[int] = True , __lowerCamelCase : Tuple = True , __lowerCamelCase : Dict = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : List[str] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : Union[str, Any] = True , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : str=400 , __lowerCamelCase : List[Any]=3 , ): '''simple docstring''' lowerCamelCase__ : str = parent lowerCamelCase__ : List[str] = do_resize lowerCamelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 288} lowerCamelCase__ : str = size_divisor lowerCamelCase__ : List[str] = do_rescale lowerCamelCase__ : str = rescale_factor lowerCamelCase__ : List[Any] = do_normalize lowerCamelCase__ : List[Any] = do_center_crop lowerCamelCase__ : str = image_mean lowerCamelCase__ : Union[str, Any] = image_std lowerCamelCase__ : int = do_pad lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = min_resolution lowerCamelCase__ : int = max_resolution def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=False ): '''simple docstring''' if not batched: lowerCamelCase__ : Optional[Any] = self.size["shortest_edge"] lowerCamelCase__ : int = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): lowerCamelCase__ , lowerCamelCase__ : Any = image.size else: lowerCamelCase__ , lowerCamelCase__ : str = image.shape[1], image.shape[2] lowerCamelCase__ : Optional[int] = size / min(__lowerCamelCase , __lowerCamelCase ) if h < w: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = size, scale * w else: lowerCamelCase__ , lowerCamelCase__ : Tuple = scale * h, size lowerCamelCase__ : Optional[int] = int((1333 / 800) * size ) if max(__lowerCamelCase , __lowerCamelCase ) > max_size: lowerCamelCase__ : Dict = max_size / max(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = newh * scale lowerCamelCase__ : Any = neww * scale lowerCamelCase__ , lowerCamelCase__ : str = int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase__ , lowerCamelCase__ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase__ : List[Any] = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : int = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] lowerCamelCase__ : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowercase ( lowercase_ , unittest.TestCase): """simple docstring""" A__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : str = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : int = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
184
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" SCREAMING_SNAKE_CASE : int = 2_5_6 # Modulus to hash a string SCREAMING_SNAKE_CASE : List[Any] = 1_0_0_0_0_0_3 def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> bool: """simple docstring""" _lowerCAmelCase = len(lowerCAmelCase__ ) _lowerCAmelCase = len(lowerCAmelCase__ ) if p_len > t_len: return False _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(lowerCAmelCase__ ): _lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _lowerCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _lowerCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __UpperCAmelCase ( ) -> None: """simple docstring""" _lowerCAmelCase = """abc1abc12""" _lowerCAmelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc""" _lowerCAmelCase = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) and not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) # Test 2) _lowerCAmelCase = """ABABX""" _lowerCAmelCase = """ABABZABABYABABX""" assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) # Test 3) _lowerCAmelCase = """AAAB""" _lowerCAmelCase = """ABAAAAAB""" assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) # Test 4) _lowerCAmelCase = """abcdabcy""" _lowerCAmelCase = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) # Test 5) _lowerCAmelCase = """Lü""" _lowerCAmelCase = """Lüsai""" assert rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) _lowerCAmelCase = """Lue""" assert not rabin_karp(lowerCAmelCase__ , lowerCAmelCase__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
361
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
0
UpperCAmelCase : Optional[Any] ={ """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
128
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() UpperCAmelCase : Optional[int] =logging.get_logger(__name__) UpperCAmelCase : int ="""Hello, World!""" UpperCAmelCase : int ="""en_XX""" def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): UpperCamelCase_ = Path("data_bin") UpperCamelCase_ = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_lowerCAmelCase).parent) , checkpoint_file=Path(_lowerCAmelCase).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_lowerCAmelCase) , bpe="sentencepiece" , sentencepiece_model=str(Path(_lowerCAmelCase).parent / "sentencepiece.bpe.model") , src_dict=str(data_dir / "dict.txt") , ) xmod.eval() # disable dropout print(_lowerCAmelCase) UpperCamelCase_ = xmod.model.encoder.sentence_encoder UpperCamelCase_ = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , _lowerCAmelCase) UpperCamelCase_ = XmodForSequenceClassification(_lowerCAmelCase) if classification_head else XmodForMaskedLM(_lowerCAmelCase) model.eval() # Now let's copy all the weights. # Embeddings UpperCamelCase_ = xmod_sent_encoder.embed_tokens.weight UpperCamelCase_ = xmod_sent_encoder.embed_positions.weight UpperCamelCase_ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them. UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.weight UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer UpperCamelCase_ = model.roberta.encoder.layer[i] UpperCamelCase_ = xmod_sent_encoder.layers[i] # self attention UpperCamelCase_ = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError("Dimensions of self-attention weights do not match.") UpperCamelCase_ = xmod_layer.self_attn.q_proj.weight UpperCamelCase_ = xmod_layer.self_attn.q_proj.bias UpperCamelCase_ = xmod_layer.self_attn.k_proj.weight UpperCamelCase_ = xmod_layer.self_attn.k_proj.bias UpperCamelCase_ = xmod_layer.self_attn.v_proj.weight UpperCamelCase_ = xmod_layer.self_attn.v_proj.bias # self-attention output UpperCamelCase_ = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match.") UpperCamelCase_ = xmod_layer.self_attn.out_proj.weight UpperCamelCase_ = xmod_layer.self_attn.out_proj.bias UpperCamelCase_ = xmod_layer.self_attn_layer_norm.weight UpperCamelCase_ = xmod_layer.self_attn_layer_norm.bias # intermediate UpperCamelCase_ = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match.") UpperCamelCase_ = xmod_layer.fca.weight UpperCamelCase_ = xmod_layer.fca.bias # output UpperCamelCase_ = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match.") UpperCamelCase_ = xmod_layer.fca.weight UpperCamelCase_ = xmod_layer.fca.bias UpperCamelCase_ = xmod_layer.final_layer_norm.weight UpperCamelCase_ = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: UpperCamelCase_ = xmod_layer.adapter_layer_norm.weight UpperCamelCase_ = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError("Lists of language adapters do not match.") for lang_code, adapter in xmod_layer.adapter_modules.items(): UpperCamelCase_ = bert_output.adapter_modules[lang_code] UpperCamelCase_ = xmod_layer.adapter_modules[lang_code] UpperCamelCase_ = from_adapter.fca.weight UpperCamelCase_ = from_adapter.fca.bias UpperCamelCase_ = from_adapter.fca.weight UpperCamelCase_ = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: UpperCamelCase_ = xmod_sent_encoder.layer_norm.weight UpperCamelCase_ = xmod_sent_encoder.layer_norm.bias if classification_head: UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.weight UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.bias UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head UpperCamelCase_ = xmod.model.encoder.lm_head.dense.weight UpperCamelCase_ = xmod.model.encoder.lm_head.dense.bias UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.weight UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.bias UpperCamelCase_ = xmod.model.encoder.lm_head.weight UpperCamelCase_ = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. UpperCamelCase_ = xmod.encode(_lowerCAmelCase).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(_lowerCAmelCase) UpperCamelCase_ = model(_lowerCAmelCase)[0] if classification_head: UpperCamelCase_ = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase)) else: UpperCamelCase_ = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape , their_output.shape) UpperCamelCase_ = torch.max(torch.abs(our_output - their_output)).item() print(f"""max_absolute_diff = {max_absolute_diff}""") # ~ 1e-7 UpperCamelCase_ = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3) print("Do both models output the same tensors?" , "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") Path(_lowerCAmelCase).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase) print(f"""Saving model to {pytorch_dump_folder_path}""") model.save_pretrained(_lowerCAmelCase) if __name__ == "__main__": UpperCAmelCase : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) UpperCAmelCase : Tuple =parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
128
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class UpperCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self , **UpperCamelCase_ ): super().__init__(**UpperCamelCase_ ) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) requires_backends(self , '''vision''' ) self.check_model_type(UpperCamelCase_ ) def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): if "text_queries" in kwargs: lowercase_ :Tuple = kwargs.pop('''text_queries''' ) if isinstance(UpperCamelCase_ , (str, Image.Image) ): lowercase_ :Union[str, Any] = {'''image''': image, '''candidate_labels''': candidate_labels} else: lowercase_ :List[str] = image lowercase_ :Any = super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) return results def UpperCamelCase ( self , **UpperCamelCase_ ): lowercase_ :Tuple = {} if "threshold" in kwargs: lowercase_ :Union[str, Any] = kwargs['''threshold'''] if "top_k" in kwargs: lowercase_ :Any = kwargs['''top_k'''] return {}, {}, postprocess_params def UpperCamelCase ( self , UpperCamelCase_ ): lowercase_ :str = load_image(inputs['''image'''] ) lowercase_ :int = inputs['''candidate_labels'''] if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase_ :List[Any] = candidate_labels.split(''',''' ) lowercase_ :Any = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCamelCase_ ): lowercase_ :Tuple = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework ) lowercase_ :List[str] = self.image_processor(UpperCamelCase_ , return_tensors=self.framework ) yield { "is_last": i == len(UpperCamelCase_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def UpperCamelCase ( self , UpperCamelCase_ ): lowercase_ :List[str] = model_inputs.pop('''target_size''' ) lowercase_ :Union[str, Any] = model_inputs.pop('''candidate_label''' ) lowercase_ :Optional[Any] = model_inputs.pop('''is_last''' ) lowercase_ :str = self.model(**UpperCamelCase_ ) lowercase_ :List[str] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0.1 , UpperCamelCase_=None ): lowercase_ :List[Any] = [] for model_output in model_outputs: lowercase_ :List[str] = model_output['''candidate_label'''] lowercase_ :str = BaseModelOutput(UpperCamelCase_ ) lowercase_ :List[Any] = self.image_processor.post_process_object_detection( outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): lowercase_ :List[Any] = outputs['''scores'''][index].item() lowercase_ :List[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] ) lowercase_ :str = {'''score''': score, '''label''': label, '''box''': box} results.append(UpperCamelCase_ ) lowercase_ :Union[str, Any] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ ) if top_k: lowercase_ :str = results[:top_k] return results def UpperCamelCase ( self , UpperCamelCase_ ): if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) lowercase_ :Any = box.int().tolist() lowercase_ :List[Any] = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
351
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCamelCase ( lowercase__ ): '''simple docstring''' lowercase : Optional[int] ="""decision_transformer""" lowercase : Dict =["""past_key_values"""] lowercase : Any ={ """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCamelCase_=17 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=1024 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="relu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ): lowercase_ :Any = state_dim lowercase_ :List[str] = act_dim lowercase_ :List[str] = hidden_size lowercase_ :int = max_ep_len lowercase_ :List[str] = action_tanh lowercase_ :Any = vocab_size lowercase_ :List[Any] = n_positions lowercase_ :List[str] = n_layer lowercase_ :Optional[Any] = n_head lowercase_ :int = n_inner lowercase_ :List[str] = activation_function lowercase_ :List[str] = resid_pdrop lowercase_ :Dict = embd_pdrop lowercase_ :List[Any] = attn_pdrop lowercase_ :Union[str, Any] = layer_norm_epsilon lowercase_ :List[str] = initializer_range lowercase_ :Any = scale_attn_weights lowercase_ :Union[str, Any] = use_cache lowercase_ :Any = scale_attn_by_inverse_layer_idx lowercase_ :Tuple = reorder_and_upcast_attn lowercase_ :int = bos_token_id lowercase_ :List[str] = eos_token_id super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
252
0
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ): if isinstance(UpperCAmelCase_ , torch.Tensor ): return image elif isinstance(UpperCAmelCase_ , PIL.Image.Image ): lowerCamelCase_ = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] lowerCamelCase_ = np.concatenate(UpperCAmelCase_ , axis=0 ) lowerCamelCase_ = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0 lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 ) lowerCamelCase_ = 2.0 * image - 1.0 lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ) elif isinstance(image[0] , torch.Tensor ): lowerCamelCase_ = torch.cat(UpperCAmelCase_ , dim=0 ) return image def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=0.9995 ): if not isinstance(UpperCAmelCase_ , np.ndarray ): lowerCamelCase_ = True lowerCamelCase_ = va.device lowerCamelCase_ = va.cpu().numpy() lowerCamelCase_ = va.cpu().numpy() lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase_ ) * np.linalg.norm(UpperCAmelCase_ )) ) if np.abs(UpperCAmelCase_ ) > DOT_THRESHOLD: lowerCamelCase_ = (1 - t) * va + t * va else: lowerCamelCase_ = np.arccos(UpperCAmelCase_ ) lowerCamelCase_ = np.sin(UpperCAmelCase_ ) lowerCamelCase_ = theta_a * t lowerCamelCase_ = np.sin(UpperCAmelCase_ ) lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a lowerCamelCase_ = sin_theta_t / sin_theta_a lowerCamelCase_ = sa * va + sa * va if inputs_are_torch: lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) return va def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ): lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 ) lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): for param in model.parameters(): lowerCamelCase_ = value class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ): """simple docstring""" super().__init__() self.register_modules( vae=UpperCamelCase , text_encoder=UpperCamelCase , clip_model=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , feature_extractor=UpperCamelCase , coca_model=UpperCamelCase , coca_tokenizer=UpperCamelCase , coca_transform=UpperCamelCase , ) lowerCamelCase_ = ( feature_extractor.size if isinstance(feature_extractor.size , UpperCamelCase ) else feature_extractor.size["shortest_edge"] ) lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , UpperCamelCase ) set_requires_grad(self.clip_model , UpperCamelCase ) def snake_case ( self , UpperCamelCase = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase ) def snake_case ( self ): """simple docstring""" self.enable_attention_slicing(UpperCamelCase ) def snake_case ( self ): """simple docstring""" set_requires_grad(self.vae , UpperCamelCase ) def snake_case ( self ): """simple docstring""" set_requires_grad(self.vae , UpperCamelCase ) def snake_case ( self ): """simple docstring""" set_requires_grad(self.unet , UpperCamelCase ) def snake_case ( self ): """simple docstring""" set_requires_grad(self.unet , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" # get the original timestep using init_timestep lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCamelCase ) lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 ) lowerCamelCase_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" if not isinstance(UpperCamelCase , torch.Tensor ): raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase )}''' ) lowerCamelCase_ = image.to(device=UpperCamelCase , dtype=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase ) ] lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 ) else: lowerCamelCase_ = self.vae.encode(UpperCamelCase ).latent_dist.sample(UpperCamelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ = 0.18_215 * init_latents lowerCamelCase_ = init_latents.repeat_interleave(UpperCamelCase , dim=0 ) lowerCamelCase_ = randn_tensor(init_latents.shape , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase ) # get latents lowerCamelCase_ = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = init_latents return latents def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.coca_transform(UpperCamelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.feature_extractor.preprocess(UpperCamelCase ) lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase ) lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase ) lowerCamelCase_ = image_embeddings_clip.repeat_interleave(UpperCamelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = latents.detach().requires_grad_() lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # predict the noise residual lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep] lowerCamelCase_ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowerCamelCase_ = torch.sqrt(UpperCamelCase ) lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , UpperCamelCase ): lowerCamelCase_ = self.scheduler.sigmas[index] lowerCamelCase_ = latents - sigma * noise_pred else: raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ = 1 / 0.18_215 * sample lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(UpperCamelCase ) lowerCamelCase_ = self.normalize(UpperCamelCase ).to(latents.dtype ) lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase ) lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase ) lowerCamelCase_ = spherical_dist_loss(UpperCamelCase , UpperCamelCase ).mean() * clip_guidance_scale lowerCamelCase_ = -torch.autograd.grad(UpperCamelCase , UpperCamelCase )[0] if isinstance(self.scheduler , UpperCamelCase ): lowerCamelCase_ = latents.detach() + grads * (sigma**2) lowerCamelCase_ = noise_pred_original else: lowerCamelCase_ = noise_pred_original - torch.sqrt(UpperCamelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 0.6 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 100 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = 0.8 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , ): """simple docstring""" if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size: raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(UpperCamelCase )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(UpperCamelCase , torch.Generator ) and batch_size > 1: lowerCamelCase_ = [generator] + [None] * (batch_size - 1) lowerCamelCase_ = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]] lowerCamelCase_ = ", ".join(UpperCamelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCamelCase ): raise ValueError( f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowerCamelCase_ = self.get_image_description(UpperCamelCase ) if style_prompt is None: if len(UpperCamelCase ): raise ValueError( f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowerCamelCase_ = self.get_image_description(UpperCamelCase ) # get prompt text embeddings for content and style lowerCamelCase_ = self.tokenizer( UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , ) lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowerCamelCase_ = self.tokenizer( UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , ) lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # duplicate text embeddings for each generation per prompt lowerCamelCase_ = text_embeddings.repeat_interleave(UpperCamelCase , dim=0 ) # set timesteps lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowerCamelCase_ = {} if accepts_offset: lowerCamelCase_ = 1 self.scheduler.set_timesteps(UpperCamelCase , **UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowerCamelCase_ ,lowerCamelCase_ = self.get_timesteps(UpperCamelCase , UpperCamelCase , self.device ) lowerCamelCase_ = timesteps[:1].repeat(UpperCamelCase ) # Preprocess image lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = self.prepare_latents( UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase ) lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = self.prepare_latents( UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase ) lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if clip_guidance_scale > 0: lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = slerp( UpperCamelCase , UpperCamelCase , UpperCamelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ = content_text_input.input_ids.shape[-1] lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase , return_tensors="pt" ) lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowerCamelCase_ = uncond_embeddings.repeat_interleave(UpperCamelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device="cpu" , dtype=UpperCamelCase ).to( self.device ) else: lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowerCamelCase_ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ = {} if accepts_eta: lowerCamelCase_ = eta # check if the scheduler accepts generator lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowerCamelCase_ = generator with self.progress_bar(total=UpperCamelCase ): for i, t in enumerate(UpperCamelCase ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # predict the noise residual lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 ) lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowerCamelCase_ = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowerCamelCase_ ,lowerCamelCase_ = self.cond_fn( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ = 1 / 0.18_215 * latents lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
55
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = '▁' lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class A ( UpperCamelCase_ , unittest.TestCase ): UpperCamelCase__ : Tuple =BigBirdTokenizer UpperCamelCase__ : Union[str, Any] =BigBirdTokenizerFast UpperCamelCase__ : Any =True UpperCamelCase__ : Optional[Any] =True def lowerCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" super().setUp() _lowerCamelCase : List[Any] =self.tokenizer_class(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" _lowerCamelCase : List[Any] ='<s>' _lowerCamelCase : Optional[Any] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def lowerCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '[MASK]' ) self.assertEqual(len(lowercase_ ) , 1004 ) def lowerCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase ( self : Any ) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return _lowerCamelCase : Union[str, Any] =self.get_tokenizer() _lowerCamelCase : int =self.get_rust_tokenizer() _lowerCamelCase : int ='I was born in 92000, and this is falsé.' _lowerCamelCase : int =tokenizer.tokenize(lowercase_ ) _lowerCamelCase : List[Any] =rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) _lowerCamelCase : str =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _lowerCamelCase : str =self.get_rust_tokenizer() _lowerCamelCase : Union[str, Any] =tokenizer.encode(lowercase_ ) _lowerCamelCase : List[Any] =rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def lowerCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : str =BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ ) _lowerCamelCase : int =tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , ) _lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCamelCase : Any =tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def lowerCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) @slow def lowerCamelCase ( self : Any ) -> Dict: """simple docstring""" _lowerCamelCase : List[str] ='Hello World!' _lowerCamelCase : Tuple =[65, 1_8536, 2260, 101, 66] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : int =( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) # fmt: off _lowerCamelCase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def lowerCamelCase ( self : Any ) -> Any: """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence _lowerCamelCase : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10] _lowerCamelCase : List[Any] =' '.join(lowercase_ ) _lowerCamelCase : List[str] =self.big_tokenizer.encode_plus(lowercase_ , return_tensors='pt' , return_token_type_ids=lowercase_ ) _lowerCamelCase : Optional[int] =self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase_ ) _lowerCamelCase : List[str] =BigBirdConfig(attention_type='original_full' ) _lowerCamelCase : Optional[Any] =BigBirdModel(lowercase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def lowerCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" _lowerCamelCase : Dict =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) _lowerCamelCase : int =tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids ) self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' ) @slow def lowerCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] ={'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
199
0
from importlib import import_module from .logging import get_logger lowerCamelCase_ : Optional[int] = get_logger(__name__) class a__ : def __init__( self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict: __a = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) __a = module._original_module if isinstance(UpperCAmelCase , _PatchedModuleObj ) else module class a__ : A__ : str = [] def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> int: __a = obj __a = target __a = new __a = target.split('.' )[0] __a = {} __a = attrs or [] def __enter__( self ) -> List[str]: *__a , __a = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(UpperCAmelCase ) ): try: __a = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a = getattr(self.obj , UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a = obj_attr # patch at top level setattr(self.obj , UpperCAmelCase , _PatchedModuleObj(UpperCAmelCase , attrs=self.attrs ) ) __a = getattr(self.obj , UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(UpperCAmelCase , UpperCAmelCase , _PatchedModuleObj(getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , attrs=self.attrs ) ) __a = getattr(UpperCAmelCase , UpperCAmelCase ) # finally set the target attribute setattr(UpperCAmelCase , UpperCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a = getattr(import_module('.'.join(UpperCAmelCase ) ) , UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , UpperCAmelCase ) is attr_value: __a = getattr(self.obj , UpperCAmelCase ) setattr(self.obj , UpperCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a = globals()['__builtins__'][target_attr] setattr(self.obj , UpperCAmelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self , *UpperCAmelCase ) -> Optional[int]: for attr in list(self.original ): setattr(self.obj , UpperCAmelCase , self.original.pop(UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.__enter__() self._active_patches.append(self ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
197
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[Any] = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[int] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
197
1
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _lowercase : Tuple = [num for num in range(3, 100001, 2) if not is_prime(num)] def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowercase_ : List[Any] = [] for num in range(len(UpperCAmelCase__ ) ): lowercase_ : int = 0 while 2 * i * i <= odd_composites[num]: lowercase_ : Union[str, Any] = odd_composites[num] - 2 * i * i if is_prime(UpperCAmelCase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(UpperCAmelCase__ ) == n: return list_nums return [] def lowerCamelCase ( ) -> int: return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
239
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" _lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" _lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class __magic_name__ ( datasets.Metric): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ): lowercase_ : List[str] = compute_mauve( p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , ) return out
239
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: _UpperCamelCase : int = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } _UpperCamelCase : int = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(_lowerCamelCase ) , _lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , x.transpose() ) ) _UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: _UpperCamelCase : int = np.random.randn(3 , 4 ) _UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) ) _UpperCamelCase : int = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : Any = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: _UpperCamelCase : Optional[int] = np.random.randn(3 , 4 ) _UpperCamelCase : Optional[Any] = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) ) _UpperCamelCase : Optional[int] = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : Tuple = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: _UpperCamelCase : List[str] = np.random.randn(3 , 4 ) _UpperCamelCase : Union[str, Any] = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , np.asarray(transpose(_lowerCamelCase ) ) ) ) _UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : Optional[int] = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCamelCase , axes=(1, 2, 0) ) ) ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase : Dict = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.reshape(_lowerCamelCase , (4, 3) ) ) ) _UpperCamelCase : str = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.reshape(_lowerCamelCase , (12, 5) ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: _UpperCamelCase : Tuple = np.random.randn(3 , 4 ) _UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) ) _UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : Tuple = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: _UpperCamelCase : str = np.random.randn(3 , 4 ) _UpperCamelCase : Optional[Any] = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) ) _UpperCamelCase : str = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : Union[str, Any] = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self : str ) -> str: _UpperCamelCase : Optional[int] = np.random.randn(3 , 4 ) _UpperCamelCase : Union[str, Any] = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.asarray(reshape(_lowerCamelCase , (4, 3) ) ) ) ) _UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 ) _UpperCamelCase : List[str] = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.asarray(reshape(_lowerCamelCase , (12, 5) ) ) ) ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: _UpperCamelCase : str = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.squeeze(_lowerCamelCase ) ) ) _UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.squeeze(_lowerCamelCase , axis=2 ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: _UpperCamelCase : Optional[Any] = np.random.randn(1 , 3 , 4 ) _UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) ) _UpperCamelCase : Dict = np.random.randn(1 , 4 , 1 , 5 ) _UpperCamelCase : str = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: _UpperCamelCase : str = np.random.randn(1 , 3 , 4 ) _UpperCamelCase : int = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) ) _UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 ) _UpperCamelCase : int = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: _UpperCamelCase : Optional[Any] = np.random.randn(1 , 3 , 4 ) _UpperCamelCase : str = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.asarray(squeeze(_lowerCamelCase ) ) ) ) _UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 ) _UpperCamelCase : Dict = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.asarray(squeeze(_lowerCamelCase , axis=2 ) ) ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: _UpperCamelCase : Tuple = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.expand_dims(_lowerCamelCase , axis=1 ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 ) _UpperCamelCase : str = torch.tensor(_lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: _UpperCamelCase : List[Any] = np.random.randn(3 , 4 ) _UpperCamelCase : Dict = tf.constant(_lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: _UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 ) _UpperCamelCase : Optional[Any] = jnp.array(_lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.asarray(expand_dims(_lowerCamelCase , axis=1 ) ) ) )
368
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase__ = True except ImportError: lowerCamelCase__ = False try: from torch.hub import _get_torch_home lowerCamelCase__ = _get_torch_home() except ImportError: lowerCamelCase__ = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) lowerCamelCase__ = os.path.join(torch_cache_home, "transformers") lowerCamelCase__ = "https://cdn.huggingface.co" lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert" lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) lowerCamelCase__ = os.path.join(PATH, "config.yaml") lowerCamelCase__ = os.path.join(PATH, "attributes.txt") lowerCamelCase__ = os.path.join(PATH, "objects.txt") lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase__ = "pytorch_model.bin" lowerCamelCase__ = "config.yaml" def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : str = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _UpperCamelCase : Any = [] with open(lowercase_ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def lowercase__ ( lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = OrderedDict() with open(lowercase_ ,"rb" ) as f: _UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _UpperCamelCase : List[str] = ckp.pop(lowercase_ ) if isinstance(lowercase_ ,np.ndarray ): _UpperCamelCase : List[Any] = torch.tensor(lowercase_ ) else: assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ ) _UpperCamelCase : Optional[Any] = v return r class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = {} def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any: _UpperCamelCase : Optional[Any] = name _UpperCamelCase : Optional[Any] = level _UpperCamelCase : Union[str, Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _UpperCamelCase : Optional[int] = copy.deepcopy(__a ) _UpperCamelCase : Dict = copy.deepcopy(__a ) if isinstance(__a , __a ): _UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 ) _UpperCamelCase : Optional[Any] = v setattr(self , __a , __a ) _UpperCamelCase : Optional[Any] = d def __repr__( self : List[str] ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int: _UpperCamelCase : Any = val _UpperCamelCase : Optional[Any] = val _UpperCamelCase : Dict = key.split("." ) _UpperCamelCase : int = len(__a ) - 1 _UpperCamelCase : List[str] = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _UpperCamelCase : str = val else: _UpperCamelCase : List[str] = pointer[l] def __SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self._pointer def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict: with open(F'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]: with open(F'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]: with open(__a ) as stream: _UpperCamelCase : int = load(__a , Loader=__a ) return data def __str__( self : List[str] ) -> Tuple: _UpperCamelCase : List[str] = " " if self._name != "root": _UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n''' else: _UpperCamelCase : Any = "" _UpperCamelCase : Any = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _UpperCamelCase : Optional[Any] = level return r[:-1] @classmethod def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]: _UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple: _UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a ) _UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a ) _UpperCamelCase : str = kwargs.pop("resume_download" , __a ) _UpperCamelCase : Any = kwargs.pop("proxies" , __a ) _UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _UpperCamelCase : Optional[Any] = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _UpperCamelCase : Optional[int] = pretrained_model_name_or_path else: _UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _UpperCamelCase : Optional[int] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _UpperCamelCase : List[Any] = Config.load_yaml(__a ) except EnvironmentError: _UpperCamelCase : Union[str, Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def lowercase__ ( lowercase_ ) -> int: """simple docstring""" _UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device ) _UpperCamelCase : str = in_tensor.numpy() _UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), ( F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def lowercase__ ( lowercase_ ) -> List[Any]: """simple docstring""" _UpperCamelCase : Dict = urlparse(lowercase_ ) return parsed.scheme in ("http", "https") def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str: """simple docstring""" _UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _UpperCamelCase : List[str] = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]: """simple docstring""" _UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(lowercase_ ,lowercase_ ): ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() ) elif isinstance(lowercase_ ,lowercase_ ): ua += "; " + user_agent _UpperCamelCase : Any = {"user-agent": ua} if resume_size > 0: _UpperCamelCase : str = "bytes=%d-" % (resume_size,) _UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ ) if response.status_code == 416: # Range not satisfiable return _UpperCamelCase : List[str] = response.headers.get("Content-Length" ) _UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None _UpperCamelCase : Optional[int] = tqdm( unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(lowercase_ ) ) temp_file.write(lowercase_ ) progress.close() def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple: """simple docstring""" if cache_dir is None: _UpperCamelCase : str = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : Dict = str(lowercase_ ) os.makedirs(lowercase_ ,exist_ok=lowercase_ ) _UpperCamelCase : Dict = None if not local_files_only: try: _UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ ) if response.status_code == 200: _UpperCamelCase : str = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ ) # get cache path to put the file _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(lowercase_ ): return cache_path else: _UpperCamelCase : Optional[int] = [ file for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(lowercase_ ) > 0: return os.path.join(lowercase_ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(lowercase_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _UpperCamelCase : Dict = cache_path + ".lock" with FileLock(lowercase_ ): # If the download just completed while the lock was activated. if os.path.exists(lowercase_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _UpperCamelCase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(lowercase_ ,"a+b" ) as f: yield f _UpperCamelCase : Union[str, Any] = _resumable_file_manager if os.path.exists(lowercase_ ): _UpperCamelCase : str = os.stat(lowercase_ ).st_size else: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ ) _UpperCamelCase : Optional[Any] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,) http_get( lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,) os.replace(temp_file.name ,lowercase_ ) _UpperCamelCase : Optional[int] = {"url": url, "etag": etag} _UpperCamelCase : List[str] = cache_path + ".json" with open(lowercase_ ,"w" ) as meta_file: json.dump(lowercase_ ,lowercase_ ) return cache_path def lowercase__ ( lowercase_ ,lowercase_=None ) -> int: """simple docstring""" _UpperCamelCase : Optional[int] = url.encode("utf-8" ) _UpperCamelCase : List[str] = shaaaa(lowercase_ ) _UpperCamelCase : List[str] = url_hash.hexdigest() if etag: _UpperCamelCase : Optional[Any] = etag.encode("utf-8" ) _UpperCamelCase : Optional[Any] = shaaaa(lowercase_ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str: """simple docstring""" if cache_dir is None: _UpperCamelCase : List[Any] = TRANSFORMERS_CACHE if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if isinstance(lowercase_ ,lowercase_ ): _UpperCamelCase : str = str(lowercase_ ) if is_remote_url(lowercase_ ): # URL, so get it from the cache (downloading if necessary) _UpperCamelCase : Union[str, Any] = get_from_cache( lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,) elif os.path.exists(lowercase_ ): # File, and it exists. _UpperCamelCase : List[str] = url_or_filename elif urlparse(lowercase_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(lowercase_ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) ) if extract_compressed_file: if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ ) _UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted" _UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ ) if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions _UpperCamelCase : Optional[int] = output_path + ".lock" with FileLock(lowercase_ ): shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ ) os.makedirs(lowercase_ ) if is_zipfile(lowercase_ ): with ZipFile(lowercase_ ,"r" ) as zip_file: zip_file.extractall(lowercase_ ) zip_file.close() elif tarfile.is_tarfile(lowercase_ ): _UpperCamelCase : int = tarfile.open(lowercase_ ) tar_file.extractall(lowercase_ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) ) return output_path_extracted return output_path def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): with open(lowercase_ ) as f: _UpperCamelCase : Tuple = eval(f.read() ) else: _UpperCamelCase : str = requests.get(lowercase_ ) try: _UpperCamelCase : Optional[int] = requests.json() except Exception: _UpperCamelCase : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: _UpperCamelCase : List[Any] = eval(lowercase_ ) except Exception: _UpperCamelCase : int = data.split("\n" ) req.close() return data def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" _UpperCamelCase : List[Any] = requests.get(lowercase_ ) _UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) ) return img def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(lowercase_ ) with open(lowercase_ ,"rb" ) as stream: _UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ ) _UpperCamelCase : Union[str, Any] = weights.pop("model" ) _UpperCamelCase : Optional[int] = {} for k, v in model.items(): _UpperCamelCase : str = torch.from_numpy(lowercase_ ) if "running_var" in k: _UpperCamelCase : List[Any] = torch.tensor([0] ) _UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" ) _UpperCamelCase : Any = zero return new def lowercase__ ( ) -> Dict: """simple docstring""" print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' ) def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int: """simple docstring""" assert isinstance(lowercase_ ,lowercase_ ) if os.path.isfile(lowercase_ ): _UpperCamelCase : Optional[Any] = cva.imread(lowercase_ ) else: _UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ ) assert img is not None, F'''could not connect to: {im}''' _UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": _UpperCamelCase : List[Any] = img[:, :, ::-1] return img def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]: """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
310
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase : Optional[Any] =logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1_6000 ) -> int: UpperCamelCase__ : Optional[Any] = int(round(sample_rate * max_length ) ) if len(SCREAMING_SNAKE_CASE__ ) <= sample_length: return wav UpperCamelCase__ : Optional[Any] = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __a : _lowerCAmelCase : Optional[str] = field(default=lowerCamelCase_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) _lowerCAmelCase : str = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) _lowerCAmelCase : str = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) _lowerCAmelCase : str = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) _lowerCAmelCase : str = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) _lowerCAmelCase : Optional[int] = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowerCAmelCase : Optional[int] = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) _lowerCAmelCase : float = field( default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __a : _lowerCAmelCase : str = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) _lowerCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowerCAmelCase : Optional[bool] = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def __lowercase ( self : str ): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , _UpperCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`." ) def SCREAMING_SNAKE_CASE ( ) -> List[Any]: UpperCamelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ : str = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCamelCase__ : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ : Union[str, Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. UpperCamelCase__ : Dict = DatasetDict() UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase__ : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' "Make sure to set `--audio_column_name` to the correct audio column - one of " f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' "Make sure to set `--label_column_name` to the correct text column - one of " f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCamelCase__ : Any = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCamelCase__ : Optional[int] = feature_extractor.model_input_names[0] def train_transforms(__lowerCAmelCase ): UpperCamelCase__ : Optional[int] = [] for audio in batch[data_args.audio_column_name]: UpperCamelCase__ : Union[str, Any] = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase__ : Tuple = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )} UpperCamelCase__ : Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__lowerCAmelCase ): UpperCamelCase__ : Any = [audio["array"] for audio in batch[data_args.audio_column_name]] UpperCamelCase__ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase__ : Dict = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )} UpperCamelCase__ : List[Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCamelCase__ : str = raw_datasets["train"].features[data_args.label_column_name].names UpperCamelCase__ , UpperCamelCase__ : Dict = {}, {} for i, label in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Tuple = label # Load the accuracy metric from the datasets package UpperCamelCase__ : List[str] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase ): UpperCamelCase__ : Optional[int] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids ) UpperCamelCase__ : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase__ : Optional[Any] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCamelCase__ : Optional[Any] = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCamelCase__ : Dict = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ ) # Initialize our trainer UpperCamelCase__ : Tuple = Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: UpperCamelCase__ : List[str] = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ : Optional[int] = last_checkpoint UpperCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ : str = trainer.evaluate() trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub UpperCamelCase__ : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
189
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: def update_area_of_max_square(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __snake_case: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1) __snake_case: Optional[Any] = update_area_of_max_square(row + 1 , col + 1) __snake_case: Union[str, Any] = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__) if mat[row][col]: __snake_case: Dict = 1 + min([right, diagonal, down]) __snake_case: Tuple = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__) return sub_problem_sol else: return 0 __snake_case: Union[str, Any] = [0] update_area_of_max_square(0 , 0) return largest_square_area[0] def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: def update_area_of_max_square_using_dp_array( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __snake_case: Optional[int] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__) __snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__) __snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) if mat[row][col]: __snake_case: Any = 1 + min([right, diagonal, down]) __snake_case: Optional[Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__) __snake_case: int = sub_problem_sol return sub_problem_sol else: return 0 __snake_case: Any = [0] __snake_case: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__)] update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__) return largest_square_area[0] def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: __snake_case: Tuple = [[0] * (cols + 1) for _ in range(rows + 1)] __snake_case: str = 0 for row in range(rows - 1 , -1 , -1): for col in range(cols - 1 , -1 , -1): __snake_case: Tuple = dp_array[row][col + 1] __snake_case: List[str] = dp_array[row + 1][col + 1] __snake_case: Tuple = dp_array[row + 1][col] if mat[row][col] == 1: __snake_case: List[str] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) __snake_case: Optional[Any] = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__) else: __snake_case: List[Any] = 0 return largest_square_area def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int: __snake_case: Union[str, Any] = [0] * (cols + 1) __snake_case: Union[str, Any] = [0] * (cols + 1) __snake_case: Any = 0 for row in range(rows - 1 , -1 , -1): for col in range(cols - 1 , -1 , -1): __snake_case: int = current_row[col + 1] __snake_case: Union[str, Any] = next_row[col + 1] __snake_case: Optional[Any] = next_row[col] if mat[row][col] == 1: __snake_case: Optional[int] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) __snake_case: int = max(current_row[col] , SCREAMING_SNAKE_CASE__) else: __snake_case: Tuple = 0 __snake_case: Tuple = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
350
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __snake_case ( __lowerCamelCase ): '''simple docstring''' def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ): super().__init__(*A , **A ) __snake_case: List[Any] = eval_examples __snake_case: str = post_process_function def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ): __snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case: Any = self.get_eval_dataloader(A ) __snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case: Union[str, Any] = self.compute_metrics __snake_case: List[str] = None __snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __snake_case: Tuple = time.time() try: __snake_case: Any = eval_loop( A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __snake_case: Optional[int] = compute_metrics __snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case: List[str] = self.post_process_function(A , A , output.predictions ) __snake_case: List[Any] = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __snake_case: str = metrics.pop(A ) metrics.update(output.metrics ) else: __snake_case: List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A ) return metrics def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ): __snake_case: Optional[Any] = self.get_test_dataloader(A ) # Temporarily disable metric computation, we will do it in the loop here. __snake_case: Optional[int] = self.compute_metrics __snake_case: List[Any] = None __snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __snake_case: Dict = time.time() try: __snake_case: str = eval_loop( A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __snake_case: List[Any] = compute_metrics __snake_case: Dict = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" ) __snake_case: str = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): __snake_case: List[str] = metrics.pop(A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
293
0
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class lowerCamelCase__ ( ctypes.Structure ): """simple docstring""" __a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": __UpperCAmelCase : Dict = CursorInfo() __UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) __UpperCAmelCase : Tuple = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": __UpperCAmelCase : str = CursorInfo() __UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) __UpperCAmelCase : Union[str, Any] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def lowerCamelCase ( ) -> str: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
115
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : str = """ylacombe/bark-small""" __UpperCAmelCase : List[Any] = tempfile.mkdtemp() __UpperCAmelCase : Optional[Any] = """en_speaker_1""" __UpperCAmelCase : Union[str, Any] = """This is a test string""" __UpperCAmelCase : Dict = """speaker_embeddings_path.json""" __UpperCAmelCase : Any = """speaker_embeddings""" def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Any = BarkProcessor(tokenizer=UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __UpperCAmelCase : List[str] = 35 __UpperCAmelCase : Tuple = 2 __UpperCAmelCase : Union[str, Any] = 8 __UpperCAmelCase : Optional[Any] = { """semantic_prompt""": np.ones(UpperCamelCase ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file __UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Optional[int] = processor(text=self.input_string , voice_preset=UpperCamelCase ) __UpperCAmelCase : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub __UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Union[str, Any] = BarkProcessor(tokenizer=UpperCamelCase ) __UpperCAmelCase : List[str] = processor(text=self.input_string ) __UpperCAmelCase : Tuple = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
115
1
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = "x", lowerCamelCase = 1_0**-1_0, lowerCamelCase = 1, ): __lowerCAmelCase = symbols(lowerCamelCase) __lowerCAmelCase = lambdify(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = lambdify(lowerCamelCase, diff(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = starting_point while True: if diff_function(lowerCamelCase) != 0: __lowerCAmelCase = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError('''Could not find root''') from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess __lowerCAmelCase = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_05)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
9
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
1
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = relative_attention __lowerCamelCase = position_biased_input __lowerCamelCase = pos_att_type __lowerCamelCase = scope def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = DebertaModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0] __lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0] __lowerCamelCase = model(lowerCamelCase__ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = DebertaForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) snake_case_ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = DebertaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='Model not available yet' ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' pass @slow def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' ) __lowerCamelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] # compare the actual values for a slice. __lowerCamelCase = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
90
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a__: List[Any] = logging.getLogger() def UpperCamelCase__( UpperCamelCase__ : Tuple )->Optional[int]: A__ = {} A__ = os.path.join(UpperCamelCase__ , '''all_results.json''' ) if os.path.exists(UpperCamelCase__ ): with open(UpperCamelCase__ , '''r''' ) as f: A__ = json.load(UpperCamelCase__ ) else: raise ValueError(f"can't find {path}" ) return results a__: Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): def UpperCamelCase ( self ): import xla_spawn A__ = self.get_auto_remove_tmp_dir() A__ = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ): A__ = time() xla_spawn.main() A__ = time() A__ = get_results(__lowerCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''],0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start,500 ) def UpperCamelCase ( self ): import xla_spawn A__ = ''' ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py '''.split() with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ): xla_spawn.main()
39
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 1.0,__lowerCamelCase = None,): super().__init__() A__ = initial_learning_rate A__ = warmup_steps A__ = power A__ = decay_schedule_fn A__ = name def __call__( self,__lowerCamelCase ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A__ = tf.cast(__lowerCamelCase,tf.floataa ) A__ = tf.cast(self.warmup_steps,tf.floataa ) A__ = global_step_float / warmup_steps_float A__ = self.initial_learning_rate * tf.math.pow(__lowerCamelCase,self.power ) return tf.cond( global_step_float < warmup_steps_float,lambda: warmup_learning_rate,lambda: self.decay_schedule_fn(step - self.warmup_steps ),name=__lowerCamelCase,) def UpperCamelCase ( self ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.999 , UpperCamelCase__ : float = 1e-8 , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[List[str]] = None , )->int: A__ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase__ , ) if num_warmup_steps: A__ = WarmUp( initial_learning_rate=UpperCamelCase__ , decay_schedule_fn=UpperCamelCase__ , warmup_steps=UpperCamelCase__ , ) if weight_decay_rate > 0.0: A__ = AdamWeightDecay( learning_rate=UpperCamelCase__ , weight_decay_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCamelCase__ , ) else: A__ = tf.keras.optimizers.Adam( learning_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): def __init__( self,__lowerCamelCase = 0.001,__lowerCamelCase = 0.9,__lowerCamelCase = 0.999,__lowerCamelCase = 1E-7,__lowerCamelCase = False,__lowerCamelCase = 0.0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = "AdamWeightDecay",**__lowerCamelCase,): super().__init__(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ) A__ = weight_decay_rate A__ = include_in_weight_decay A__ = exclude_from_weight_decay @classmethod def UpperCamelCase ( cls,__lowerCamelCase ): A__ = {'''WarmUp''': WarmUp} return super(__lowerCamelCase,cls ).from_config(__lowerCamelCase,custom_objects=__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): super(__lowerCamelCase,self )._prepare_local(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ) A__ = tf.constant( self.weight_decay_rate,name='''adam_weight_decay_rate''' ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''],use_locking=self._use_locking,) return tf.no_op() def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ): A__ , A__ = list(zip(*__lowerCamelCase ) ) return super(__lowerCamelCase,self ).apply_gradients(zip(__lowerCamelCase,__lowerCamelCase ),name=__lowerCamelCase,**__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} A__ = apply_state or {} A__ = apply_state.get((var_device, var_dtype) ) if coefficients is None: A__ = self._fallback_apply_state(__lowerCamelCase,__lowerCamelCase ) A__ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ): A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase ) A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ) with tf.control_dependencies([decay] ): return super(__lowerCamelCase,self )._resource_apply_dense(__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ): A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase ) A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ) with tf.control_dependencies([decay] ): return super(__lowerCamelCase,self )._resource_apply_sparse(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ) def UpperCamelCase ( self ): A__ = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def UpperCamelCase ( self,__lowerCamelCase ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(__lowerCamelCase,__lowerCamelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(__lowerCamelCase,__lowerCamelCase ) is not None: return False return True class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): def __init__( self ): A__ = [] A__ = None @property def UpperCamelCase ( self ): if self._accum_steps is None: A__ = tf.Variable( tf.constant(0,dtype=tf.intaa ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,) return self._accum_steps.value() @property def UpperCamelCase ( self ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self,__lowerCamelCase ): if not self._gradients: A__ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(__lowerCamelCase ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,) if gradient is not None else gradient for gradient in gradients ] ) if len(__lowerCamelCase ) != len(self._gradients ): raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(__lowerCamelCase )}" ) for accum_gradient, gradient in zip(self._gradients,__lowerCamelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(__lowerCamelCase ) self._accum_steps.assign_add(1 ) def UpperCamelCase ( self ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(__lowerCamelCase ) )
39
1
"""simple docstring""" class lowercase : def __init__( self : Union[str, Any] , _lowerCamelCase : str ): """simple docstring""" A_ : Tuple = val A_ : str = None A_ : Any = None def a_ ( self : List[Any] , _lowerCamelCase : str ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : str = Node(__a ) else: self.left.insert(__a ) elif val > self.val: if self.right is None: A_ : int = Node(__a ) else: self.right.insert(__a ) else: A_ : List[Any] = val def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if root: inorder(root.left , lowerCAmelCase_ ) res.append(root.val ) inorder(root.right , lowerCAmelCase_ ) def lowercase_ ( _UpperCAmelCase ): """simple docstring""" if len(lowerCAmelCase_ ) == 0: return arr A_ : str = Node(arr[0] ) for i in range(1 , len(lowerCAmelCase_ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Optional[Any] = [] inorder(lowerCAmelCase_ , lowerCAmelCase_ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
167
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''') lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') lowerCamelCase : Optional[int] = '''pt''' if is_torch_available() else '''tf''' @require_sentencepiece @require_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Dict = CamembertTokenizer _A : Union[str, Any] = CamembertTokenizerFast _A : Union[str, Any] = True _A : Tuple = True def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase : Union[str, Any] = CamembertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = """<pad>""" __lowercase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__a ) , 1004 ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = CamembertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) __lowercase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowercase : List[str] = """I was born in 92000, and this is falsé.""" __lowercase : Optional[Any] = tokenizer.encode(__a ) __lowercase : List[Any] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) __lowercase : Tuple = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowercase : Dict = tokenizer.convert_ids_to_tokens(__a ) __lowercase : Any = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase : List[str] = self.get_tokenizer() __lowercase : Any = self.get_rust_tokenizer() __lowercase : Any = """I was born in 92000, and this is falsé.""" __lowercase : Tuple = tokenizer.tokenize(__a ) __lowercase : Optional[Any] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __lowercase : Any = self.get_rust_tokenizer() __lowercase : str = tokenizer.encode(__a ) __lowercase : Union[str, Any] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : str = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowercase : List[str] = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__a , )
233
0
"""simple docstring""" from collections import defaultdict from math import gcd def a__ ( lowerCAmelCase__ = 1500000 ): UpperCAmelCase_ = defaultdict(_lowerCAmelCase ) UpperCAmelCase_ = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _lowerCAmelCase , 2 ): if gcd(_lowerCAmelCase , _lowerCAmelCase ) > 1: continue UpperCAmelCase_ = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"{solution() = }")
354
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCamelCase = """src/diffusers""" lowerCamelCase = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowerCamelCase = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowerCamelCase = spec.loader.load_module() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase__ ) is not None def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = object_name.split("." ) UpperCAmelCase_ = 0 # First let's find the module where our object lives. UpperCAmelCase_ = parts[i] while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) ): i += 1 if i < len(lowerCAmelCase__ ): UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , parts[i] ) if i >= len(lowerCAmelCase__ ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase_ = f.readlines() # Now let's find the class / func in the code! UpperCAmelCase_ = "" UpperCAmelCase_ = 0 for name in parts[i + 1 :]: while ( line_index < len(lowerCAmelCase__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lowerCAmelCase__ ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). UpperCAmelCase_ = line_index while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCAmelCase_ = lines[start_index:line_index] return "".join(lowerCAmelCase__ ) lowerCamelCase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowerCamelCase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowerCamelCase = re.compile(r"""<FILL\s+[^>]*>""") def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = code.split("\n" ) UpperCAmelCase_ = 0 while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(lowerCAmelCase__ ): return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0] return "" def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = len(get_indent(lowerCAmelCase__ ) ) > 0 if has_indent: UpperCAmelCase_ = f"""class Bla:\n{code}""" UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ ) UpperCAmelCase_ = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = style_docstrings_in_code(lowerCAmelCase__ ) return result[len("class Bla:\n" ) :] if has_indent else result def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ): with open(lowerCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = [] UpperCAmelCase_ = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lowerCAmelCase__ ): UpperCAmelCase_ = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = search.groups() UpperCAmelCase_ = find_code_in_diffusers(lowerCAmelCase__ ) UpperCAmelCase_ = get_indent(lowerCAmelCase__ ) UpperCAmelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2 UpperCAmelCase_ = theoretical_indent UpperCAmelCase_ = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. UpperCAmelCase_ = True while line_index < len(lowerCAmelCase__ ) and should_continue: line_index += 1 if line_index >= len(lowerCAmelCase__ ): break UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCAmelCase__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCAmelCase_ = lines[start_index:line_index] UpperCAmelCase_ = "".join(lowerCAmelCase__ ) # Remove any nested `Copied from` comments to avoid circular copies UpperCAmelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None] UpperCAmelCase_ = "\n".join(lowerCAmelCase__ ) # Before comparing, use the `replace_pattern` on the original code. if len(lowerCAmelCase__ ) > 0: UpperCAmelCase_ = replace_pattern.replace("with" , "" ).split("," ) UpperCAmelCase_ = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns] for pattern in patterns: if pattern is None: continue UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pattern.groups() UpperCAmelCase_ = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if option.strip() == "all-casing": UpperCAmelCase_ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ ) UpperCAmelCase_ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line UpperCAmelCase_ = blackify(lines[start_index - 1] + theoretical_code ) UpperCAmelCase_ = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: UpperCAmelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:] UpperCAmelCase_ = start_index + 1 if overwrite and len(lowerCAmelCase__ ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(lowerCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lowerCAmelCase__ ) return diffs def a__ ( lowerCAmelCase__ = False ): UpperCAmelCase_ = glob.glob(os.path.join(lowerCAmelCase__ , "**/*.py" ) , recursive=lowerCAmelCase__ ) UpperCAmelCase_ = [] for filename in all_files: UpperCAmelCase_ = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(lowerCAmelCase__ ) > 0: UpperCAmelCase_ = "\n".join(lowerCAmelCase__ ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCamelCase = parser.parse_args() check_copies(args.fix_and_overwrite)
241
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]: """simple docstring""" A__ = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('''head''' ): A__ = '''segformer.encoder.''' + key if key.startswith('''backbone''' ): A__ = key.replace('''backbone''' , '''segformer.encoder''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] A__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase_ )-1}""" ) if "norm" in key: A__ = key.replace('''norm''' , '''layer_norm''' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )] A__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase_ )-1}""" ) if "layer_norm1" in key: A__ = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: A__ = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 A__ = key[key.find('''block''' ) + len('''block''' )] A__ = key.replace(f"""block{idx}""" , f"""block.{int(lowercase_ )-1}""" ) if "attn.q" in key: A__ = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: A__ = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: A__ = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: A__ = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: A__ = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: A__ = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: A__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) A__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ = key[key.find('''linear_c''' ) + len('''linear_c''' )] A__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase_ )-1}""" ) if key.startswith('''head''' ): A__ = key.replace('''head''' , '''classifier''' ) A__ = value return new_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) A__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict A__ = kv_weight[ : config.hidden_sizes[i], : ] A__ = kv_bias[: config.hidden_sizes[i]] A__ = kv_weight[ config.hidden_sizes[i] :, : ] A__ = kv_bias[ config.hidden_sizes[i] : ] def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ = SegformerConfig() A__ = False # set attributes based on model_name A__ = '''huggingface/label-files''' if "segformer" in model_name: A__ = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2] if "ade" in model_name: A__ = 150 A__ = '''ade20k-id2label.json''' A__ = (1, 150, 128, 128) elif "city" in model_name: A__ = 19 A__ = '''cityscapes-id2label.json''' A__ = (1, 19, 128, 128) else: raise ValueError(f"""Model {model_name} not supported""" ) elif "mit" in model_name: A__ = True A__ = model_name[4:6] A__ = 1_000 A__ = '''imagenet-1k-id2label.json''' A__ = (1, 1_000) else: raise ValueError(f"""Model {model_name} not supported""" ) # set config attributes A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": A__ = [64, 128, 320, 512] A__ = 256 elif size == "b2": A__ = [64, 128, 320, 512] A__ = 768 A__ = [3, 4, 6, 3] elif size == "b3": A__ = [64, 128, 320, 512] A__ = 768 A__ = [3, 4, 18, 3] elif size == "b4": A__ = [64, 128, 320, 512] A__ = 768 A__ = [3, 8, 27, 3] elif size == "b5": A__ = [64, 128, 320, 512] A__ = 768 A__ = [3, 6, 40, 3] else: raise ValueError(f"""Size {size} not supported""" ) # load image processor (only resize + normalize) A__ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ ) # prepare image A__ = prepare_img() A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict if encoder_only: A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) ) else: A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )['''state_dict'''] # rename keys A__ = rename_keys(lowercase_ , encoder_only=lowercase_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(lowercase_ , lowercase_ ) # create HuggingFace model and load state dict if encoder_only: A__ = False A__ = SegformerForImageClassification(lowercase_ ) else: A__ = SegformerForSemanticSegmentation(lowercase_ ) model.load_state_dict(lowercase_ ) model.eval() # forward pass A__ = model(lowercase_ ) A__ = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": A__ = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": A__ = torch.tensor( [ [[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]], [[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]], [[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": A__ = torch.tensor( [ [[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]], [[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]], [[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": A__ = torch.tensor( [ [[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]], [[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]], [[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": A__ = torch.tensor( [ [[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]], [[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]], [[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": A__ = torch.tensor( [ [[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]], [[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]], [[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": A__ = torch.tensor( [ [[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]], [[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]], [[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": A__ = torch.tensor( [ [[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]], [[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]], [[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": A__ = torch.tensor( [ [ [-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1], [-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1], [-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1], ], [ [-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1], [-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1], [-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1], ], [ [7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2], [4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1], [3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": A__ = torch.tensor( [ [[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]], [[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]], [[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": A__ = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": A__ = torch.tensor( [ [[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]], [[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]], [[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": A__ = torch.tensor( [ [[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]], [[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]], [[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": A__ = torch.tensor( [ [[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]], [[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]], [[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": A__ = torch.tensor( [ [[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]], [[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]], [[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]], ] ) else: A__ = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) image_processor.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _lowerCamelCase : Union[str, Any] = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
14
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
1
'''simple docstring''' from collections import defaultdict def UpperCAmelCase ( a_ ) -> int: """simple docstring""" A_ : Optional[Any] = 1 A_ : List[Any] = True for v in tree[start]: if v not in visited: ret += dfs(a_ ) if ret % 2 == 0: cuts.append(a_ ) return ret def UpperCAmelCase ( ) -> int: """simple docstring""" dfs(1 ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = 10, 9 UpperCamelCase__ : Any = defaultdict(list) UpperCamelCase__ : dict[int, bool] = {} UpperCamelCase__ : list[int] = [] UpperCamelCase__ : Any = 0 UpperCamelCase__ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
362
'''simple docstring''' UpperCamelCase__ : int = {str(digit): digit**5 for digit in range(10)} def UpperCAmelCase ( a_ ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) ) def UpperCAmelCase ( ) -> int: """simple docstring""" return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(a_ ) ) if __name__ == "__main__": print(solution())
164
0
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __lowerCamelCase : Optional[int] = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( A__ ): def __init__( self : Optional[int],*_A : Tuple,_A : List[Any]=None,_A : Tuple=None,_A : Optional[int]=None,**_A : Union[str, Any] ): """simple docstring""" super().__init__(*_A,**_A ) SCREAMING_SNAKE_CASE_ : Tuple = eval_examples SCREAMING_SNAKE_CASE_ : str = post_process_function SCREAMING_SNAKE_CASE_ : Dict = quant_trainer_args SCREAMING_SNAKE_CASE_ : Optional[int] = 128 # default number of calibration samples def __UpperCamelCase ( self : Optional[int],_A : Optional[Any]=None ): """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset." ) SCREAMING_SNAKE_CASE_ : Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = self._remove_unused_columns(_A,description="Calibration" ) return DataLoader( _A,batch_size=self.args.eval_batch_size,collate_fn=self.data_collator,drop_last=self.args.dataloader_drop_last,num_workers=self.args.dataloader_num_workers,pin_memory=self.args.dataloader_pin_memory,shuffle=_A,) def __UpperCamelCase ( self : List[Any],_A : Optional[Any]=None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.train_dataset if calib_dataset is None else calib_dataset SCREAMING_SNAKE_CASE_ : Dict = self.get_calib_dataloader(_A ) SCREAMING_SNAKE_CASE_ : List[Any] = self.model quant_trainer.configure_model(_A,self.quant_trainer_args,calib=_A ) model.eval() quant_trainer.enable_calibration(_A ) logger.info("***** Running calibration *****" ) logger.info(F' Num examples = {self.calib_num}' ) logger.info(F' Batch size = {calib_dataloader.batch_size}' ) for step, inputs in enumerate(_A ): # Prediction step SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.prediction_step(_A,_A,prediction_loss_only=_A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(_A,self.quant_trainer_args ) SCREAMING_SNAKE_CASE_ : Any = model def __UpperCamelCase ( self : Optional[Any],_A : Dict=None,_A : List[str]=None,_A : Union[str, Any]=None,_A : str = "eval" ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE_ : Any = self.get_eval_dataloader(_A ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE_ : List[str] = self.compute_metrics SCREAMING_SNAKE_CASE_ : List[Any] = None SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE_ : str = eval_loop( _A,description="Evaluation",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,) finally: SCREAMING_SNAKE_CASE_ : List[Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: SCREAMING_SNAKE_CASE_ : Any = self.post_process_function(_A,_A,output.predictions ) SCREAMING_SNAKE_CASE_ : Any = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): SCREAMING_SNAKE_CASE_ : str = metrics.pop(_A ) self.log(_A ) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE_ : List[Any] = self.callback_handler.on_evaluate(self.args,self.state,self.control,_A ) return metrics def __UpperCamelCase ( self : Any,_A : Optional[Any],_A : List[Any],_A : str=None,_A : str = "test" ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE_ : Dict = self.compute_metrics SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE_ : Any = eval_loop( _A,description="Prediction",prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,) finally: SCREAMING_SNAKE_CASE_ : List[str] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(_A,_A,output.predictions,"predict" ) SCREAMING_SNAKE_CASE_ : int = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = metrics.pop(_A ) return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=_A ) def __UpperCamelCase ( self : Optional[int],_A : Any="./" ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.eval_dataset SCREAMING_SNAKE_CASE_ : List[Any] = self.get_eval_dataloader(_A ) SCREAMING_SNAKE_CASE_ : int = next(iter(_A ) ) # saving device - to make it consistent SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) # convert to tuple SCREAMING_SNAKE_CASE_ : List[Any] = tuple(v.to(_A ) for k, v in batch.items() ) logger.info("Converting model to be onnx compatible" ) from pytorch_quantization.nn import TensorQuantizer SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : int = self.model.to(_A ) model.eval() model.float() SCREAMING_SNAKE_CASE_ : Any = model.module if hasattr(_A,"module" ) else model quant_trainer.configure_model(_A,self.quant_trainer_args ) SCREAMING_SNAKE_CASE_ : int = os.path.join(_A,"model.onnx" ) logger.info(F'exporting model to {output_model_file}' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: "batch_size", 1: "seq_len"} torch.onnx.export( _A,_A,_A,export_params=_A,opset_version=13,do_constant_folding=_A,input_names=["input_ids", "attention_mask", "token_type_ids"],output_names=["output_start_logits", "output_end_logits"],dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, },verbose=_A,) logger.info("onnx export finished" )
18
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase : Union[str, Any] = { '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor'''] __lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
18
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowerCamelCase ( lowerCamelCase__ : Tuple ): '''simple docstring''' lowerCamelCase = SwinConfig() lowerCamelCase = swin_name.split("""_""" ) lowerCamelCase = name_split[1] lowerCamelCase = int(name_split[4] ) lowerCamelCase = int(name_split[3][-1] ) if model_size == "tiny": lowerCamelCase = 96 lowerCamelCase = (2, 2, 6, 2) lowerCamelCase = (3, 6, 12, 24) elif model_size == "small": lowerCamelCase = 96 lowerCamelCase = (2, 2, 18, 2) lowerCamelCase = (3, 6, 12, 24) elif model_size == "base": lowerCamelCase = 128 lowerCamelCase = (2, 2, 18, 2) lowerCamelCase = (4, 8, 16, 32) else: lowerCamelCase = 192 lowerCamelCase = (2, 2, 18, 2) lowerCamelCase = (6, 12, 24, 48) if "in22k" in swin_name: lowerCamelCase = 21841 else: lowerCamelCase = 1000 lowerCamelCase = """huggingface/label-files""" lowerCamelCase = """imagenet-1k-id2label.json""" lowerCamelCase = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase = idalabel lowerCamelCase = {v: k for k, v in idalabel.items()} lowerCamelCase = img_size lowerCamelCase = num_classes lowerCamelCase = embed_dim lowerCamelCase = depths lowerCamelCase = num_heads lowerCamelCase = window_size return config def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' if "patch_embed.proj" in name: lowerCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowerCamelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: lowerCamelCase = """encoder.""" + name if "attn.proj" in name: lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowerCamelCase = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": lowerCamelCase = """layernorm.weight""" if name == "norm.bias": lowerCamelCase = """layernorm.bias""" if "head" in name: lowerCamelCase = name.replace("""head""" , """classifier""" ) else: lowerCamelCase = """swin.""" + name return name def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase = orig_state_dict.pop(lowerCamelCase__ ) if "mask" in key: continue elif "qkv" in key: lowerCamelCase = key.split(""".""" ) lowerCamelCase = int(key_split[1] ) lowerCamelCase = int(key_split[3] ) lowerCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase = val[:dim, :] lowerCamelCase = val[ dim : dim * 2, : ] lowerCamelCase = val[-dim:, :] else: lowerCamelCase = val[ :dim ] lowerCamelCase = val[ dim : dim * 2 ] lowerCamelCase = val[ -dim: ] else: lowerCamelCase = val return orig_state_dict def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ): '''simple docstring''' lowerCamelCase = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ ) timm_model.eval() lowerCamelCase = get_swin_config(lowerCamelCase__ ) lowerCamelCase = SwinForImageClassification(lowerCamelCase__ ) model.eval() lowerCamelCase = convert_state_dict(timm_model.state_dict() , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) lowerCamelCase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ) lowerCamelCase = timm_model(inputs["""pixel_values"""] ) lowerCamelCase = model(**lowerCamelCase__ ).logits assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) UpperCAmelCase : Tuple = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
66
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { "BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __lowercase ( a_ ): """simple docstring""" UpperCamelCase : Any = "altclip_text_model" def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int: '''simple docstring''' super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = hidden_act lowerCamelCase = intermediate_size lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = type_vocab_size lowerCamelCase = initializer_range lowerCamelCase = initializer_factor lowerCamelCase = layer_norm_eps lowerCamelCase = position_embedding_type lowerCamelCase = use_cache lowerCamelCase = project_dim class __lowercase ( a_ ): """simple docstring""" UpperCamelCase : Dict = "altclip_vision_model" def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict: '''simple docstring''' super().__init__(**A ) lowerCamelCase = hidden_size lowerCamelCase = intermediate_size lowerCamelCase = projection_dim lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = image_size lowerCamelCase = initializer_range lowerCamelCase = initializer_factor lowerCamelCase = attention_dropout lowerCamelCase = layer_norm_eps lowerCamelCase = hidden_act @classmethod def __A ( cls , A , **A ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(A ) lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("""model_type""" ) == "altclip": lowerCamelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class __lowercase ( a_ ): """simple docstring""" UpperCamelCase : Optional[Any] = "altclip" UpperCamelCase : Optional[Any] = True def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict: '''simple docstring''' lowerCamelCase = kwargs.pop("""text_config_dict""" , A ) lowerCamelCase = kwargs.pop("""vision_config_dict""" , A ) super().__init__(**A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase = {} # This is the complete result when using `text_config_dict`. lowerCamelCase = AltCLIPTextConfig(**A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase = ( F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' F'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase = ( F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' F'value `text_config["{key}"]` will be overriden.' ) logger.warning(A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase = {} # This is the complete result when using `vision_config_dict`. lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase = { str(A ): value for key, value in _vision_config_dict["""id2label"""].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase = ( F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' F'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase = ( F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' F'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase = {} logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" ) if vision_config is None: lowerCamelCase = {} logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" ) lowerCamelCase = AltCLIPTextConfig(**A ) lowerCamelCase = AltCLIPVisionConfig(**A ) lowerCamelCase = projection_dim lowerCamelCase = logit_scale_init_value lowerCamelCase = 1.0 @classmethod def __A ( cls , A , A , **A ) -> Dict: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase = copy.deepcopy(self.__dict__ ) lowerCamelCase = self.text_config.to_dict() lowerCamelCase = self.vision_config.to_dict() lowerCamelCase = self.__class__.model_type return output
66
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP __A =False try: __A =_is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase = None , lowercase = [] ) -> Optional[int]: lowerCamelCase_ = 0 lowerCamelCase_ = choices lowerCamelCase_ = prompt if sys.platform == "win32": lowerCamelCase_ = "*" else: lowerCamelCase_ = "➔ " def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "" ) -> int: if sys.platform != "win32": writeColor(self.choices[index] , 32 , lowercase ) else: forceWrite(self.choices[index] , lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]: if index == self.position: forceWrite(f' {self.arrow_char} ' ) self.write_choice(lowercase ) else: forceWrite(f' {self.choices[index]}' ) reset_cursor() def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = 1 ) -> List[Any]: lowerCamelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(lowercase ) move_cursor(lowercase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def SCREAMING_SNAKE_CASE_( self ) -> int: self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def SCREAMING_SNAKE_CASE_( self ) -> Any: self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(lowercase )] for number in range(10 )] ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ = int(chr(self.current_selection ) ) lowerCamelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , lowercase ) else: return else: return def SCREAMING_SNAKE_CASE_( self , lowercase = 0 ) -> List[str]: if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) lowerCamelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(lowercase ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: lowerCamelCase_ = int(builtins.input() ) except ValueError: lowerCamelCase_ = default_choice else: lowerCamelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(lowercase , "\n" ) return choice
19
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __A =logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( snake_case_ ): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]: super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowerCamelCase_ = ( f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`' f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ' "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase ) lowerCamelCase_ = dict(scheduler.config ) lowerCamelCase_ = 1 lowerCamelCase_ = FrozenDict(lowercase ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowerCamelCase_ = ( f'The configuration file of this scheduler: {scheduler} has not set the configuration' " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase ) lowerCamelCase_ = dict(scheduler.config ) lowerCamelCase_ = True lowerCamelCase_ = FrozenDict(lowercase ) if safety_checker is None: logger.warning( f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , ) def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: self.enable_attention_slicing(lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> str: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowerCamelCase_ = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowercase , lowercase ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int: lowerCamelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowerCamelCase_ = self.segmentation_model(**lowercase ) lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowerCamelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
19
1
from __future__ import annotations import math def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' lowercase__ : int = str(SCREAMING_SNAKE_CASE_ ) lowercase__ : Union[str, Any] = [n] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3: if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ): return False return True def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ): '''simple docstring''' lowercase__ : list[int] = [] lowercase__ : Tuple = 13 while len(SCREAMING_SNAKE_CASE_ ) != count: if validate(SCREAMING_SNAKE_CASE_ ): lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ ) if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ): list_truncated_primes.append(SCREAMING_SNAKE_CASE_ ) num += 2 return list_truncated_primes def snake_case__ ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F'''{sum(compute_truncated_primes(11)) = }''')
216
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case_ = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : Optional[Any] = ["""pixel_values"""] def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 255 , a = True , a = None , a = None , a = True , **a , ): super().__init__(**a) lowercase__ : List[str] = size if size is not None else {'shortest_edge': 224} lowercase__ : str = get_size_dict(a , default_to_square=a) lowercase__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a , param_name='crop_size') lowercase__ : List[str] = do_resize lowercase__ : List[Any] = size lowercase__ : Tuple = resample lowercase__ : int = do_center_crop lowercase__ : Union[str, Any] = crop_size lowercase__ : int = do_rescale lowercase__ : List[str] = rescale_factor lowercase__ : Tuple = do_normalize lowercase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : List[Any] = do_convert_rgb def snake_case_ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ): lowercase__ : str = get_size_dict(a , default_to_square=a) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") lowercase__ : str = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a) return resize(a , size=a , resample=a , data_format=a , **a) def snake_case_ ( self , a , a , a = None , **a , ): lowercase__ : List[str] = get_size_dict(a) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""") return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a) def snake_case_ ( self , a , a , a = None , **a , ): return rescale(a , scale=a , data_format=a , **a) def snake_case_ ( self , a , a , a , a = None , **a , ): return normalize(a , mean=a , std=a , data_format=a , **a) def snake_case_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ): lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Tuple = size if size is not None else self.size lowercase__ : Union[str, Any] = get_size_dict(a , param_name='size' , default_to_square=a) lowercase__ : Optional[Any] = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : List[Any] = crop_size if crop_size is not None else self.crop_size lowercase__ : Union[str, Any] = get_size_dict(a , param_name='crop_size' , default_to_square=a) lowercase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : str = make_list_of_images(a) if not valid_images(a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : str = [convert_to_rgb(a) for image in images] # All transformations expect numpy arrays. lowercase__ : Dict = [to_numpy_array(a) for image in images] if do_resize: lowercase__ : Tuple = [self.resize(image=a , size=a , resample=a) for image in images] if do_center_crop: lowercase__ : List[str] = [self.center_crop(image=a , size=a) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=a , scale=a) for image in images] if do_normalize: lowercase__ : Tuple = [self.normalize(image=a , mean=a , std=a) for image in images] lowercase__ : Optional[int] = [to_channel_dimension_format(a , a) for image in images] lowercase__ : Dict = {'pixel_values': images} return BatchFeature(data=a , tensor_type=a)
216
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _A ( __magic_name__): SCREAMING_SNAKE_CASE : int = (KDPMaDiscreteScheduler,) SCREAMING_SNAKE_CASE : Optional[Any] = 10 def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = { 'num_train_timesteps': 1100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**_SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase ( self ): """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' ) SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample.to(_SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE_ : int = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = output.prev_sample SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def UpperCAmelCase ( self ): """simple docstring""" if torch_device == "mps": return SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE_ : int = self.dummy_model() SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample.to(_SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def UpperCAmelCase ( self ): """simple docstring""" if torch_device == "mps": return SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = self.dummy_model() SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample_deter.to(_SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE_ : Any = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) if str(_SCREAMING_SNAKE_CASE ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
253
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__) class _A ( __magic_name__): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True}) SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''')}) SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel}) SCREAMING_SNAKE_CASE : str = "text" SCREAMING_SNAKE_CASE : str = "labels" def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(self ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.label_schema.copy() SCREAMING_SNAKE_CASE_ : List[Any] = features[self.label_column] SCREAMING_SNAKE_CASE_ : List[Any] = label_schema return task_template @property def UpperCAmelCase ( self ): """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
253
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : int = 32 def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]: snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. snake_case_ = DataLoader( tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) snake_case_ = DataLoader( tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: # Initialize accelerator snake_case_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ = config["""lr"""] snake_case_ = int(config["""num_epochs"""] ) snake_case_ = int(config["""seed"""] ) snake_case_ = int(config["""batch_size"""] ) snake_case_ = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer snake_case_ = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: snake_case_ = 1 snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over snake_case_ = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ = 0 # Now we train the model snake_case_ = evaluate.load("""glue""" , """mrpc""" ) snake_case_ = 0 snake_case_ = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = model(**_SCREAMING_SNAKE_CASE ) snake_case_ = outputs.loss snake_case_ = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() snake_case_ = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ = model(**_SCREAMING_SNAKE_CASE ) snake_case_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case_ , snake_case_ = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_SCREAMING_SNAKE_CASE ) - 1: snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) snake_case_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE ) snake_case_ = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: snake_case_ = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( ) -> int: snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( """--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , ) snake_case_ = parser.parse_args() snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
363
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE : List[str] = namedtuple('covid_data', 'cases deaths recovered') def _a ( _SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> covid_data: snake_case_ = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(_SCREAMING_SNAKE_CASE ).content ).xpath(_SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE : List[str] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
233
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : Any , *__a : Optional[Any] , **__a : List[str] ): warnings.warn( "The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use OwlViTImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] ) -> int: """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_UpperCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
150
0
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : List[str] = eval_examples UpperCAmelCase__ : List[Any] = post_process_function def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : Tuple = gen_kwargs.copy() UpperCAmelCase__ : Optional[Any] = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) UpperCAmelCase__ : int = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) UpperCAmelCase__ : int = gen_kwargs UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : List[str] = self.compute_metrics UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = time.time() UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : int = eval_loop( _lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: UpperCAmelCase__ : Any = compute_metrics UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) else: UpperCAmelCase__ : List[str] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase ) return metrics def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = gen_kwargs.copy() UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : Any = self.compute_metrics UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = time.time() UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : List[str] = eval_loop( _lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , ) finally: UpperCAmelCase__ : int = compute_metrics UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" ) UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
166
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _A = 6_378_137.0 _A = 6_356_752.314_245 _A = 6_37_81_37 def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float: UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) ) UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values UpperCAmelCase__ : int = (b_lata + b_lata) / 2 UpperCAmelCase__ : Any = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2) UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2 UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2) UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2 UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
166
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _A ( lowerCAmelCase ): def A__ ( self ): """simple docstring""" lowercase = tempfile.mkdtemp() lowercase = 5 # Realm tok lowercase = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowercase = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) lowercase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowercase = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) def A__ ( self ): """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def A__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def A__ ( self ): """simple docstring""" lowercase = RealmConfig(num_block_records=self.num_block_records ) return config def A__ ( self ): """simple docstring""" lowercase = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def A__ ( self ): """simple docstring""" lowercase = np.array( [ B"""This is the first record""", B"""This is the second record""", B"""This is the third record""", B"""This is the fourth record""", B"""This is the fifth record""", B"""This is a longer longer longer record""", ] , dtype=__lowerCAmelCase , ) return block_records def A__ ( self ): """simple docstring""" lowercase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def A__ ( self ): """simple docstring""" lowercase = self.get_config() lowercase = self.get_dummy_retriever() lowercase = retriever.tokenizer lowercase = np.array([0, 3] , dtype="""long""" ) lowercase = tokenizer(["""Test question"""] ).input_ids lowercase = tokenizer( ["""the fourth"""] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids lowercase = config.reader_seq_len lowercase , lowercase , lowercase , lowercase = retriever( __lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""np""" ) self.assertEqual(len(__lowerCAmelCase ) , 2 ) self.assertEqual(len(__lowerCAmelCase ) , 2 ) self.assertEqual(len(__lowerCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def A__ ( self ): """simple docstring""" lowercase = self.get_config() lowercase = self.get_dummy_retriever() lowercase = retriever.tokenizer lowercase = np.array([0, 3, 5] , dtype="""long""" ) lowercase = tokenizer(["""Test question"""] ).input_ids lowercase = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids lowercase = config.reader_seq_len lowercase , lowercase , lowercase , lowercase = retriever( __lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""np""" ) self.assertEqual([False, True, True] , __lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" lowercase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , B"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: lowercase = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) lowercase = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
197
"""simple docstring""" def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> int: '''simple docstring''' lowercase = [0 for i in range(r + 1 )] # nc0 = 1 lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=1_0, r=5))
197
1
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCAmelCase__ ( __a ): def __init__( self : Dict ) ->Dict: '''simple docstring''' _UpperCAmelCase : List[Any] = [] def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) ->Optional[Any]: '''simple docstring''' self.events.append("on_init_end" ) def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ) ->int: '''simple docstring''' self.events.append("on_train_begin" ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ) ->Optional[Any]: '''simple docstring''' self.events.append("on_train_end" ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ) ->Dict: '''simple docstring''' self.events.append("on_epoch_begin" ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->str: '''simple docstring''' self.events.append("on_epoch_end" ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->Optional[Any]: '''simple docstring''' self.events.append("on_step_begin" ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , **lowerCamelCase__ : Dict ) ->List[Any]: '''simple docstring''' self.events.append("on_step_end" ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , **lowerCamelCase__ : int ) ->Optional[Any]: '''simple docstring''' self.events.append("on_evaluate" ) def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) ->Dict: '''simple docstring''' self.events.append("on_predict" ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) ->Optional[int]: '''simple docstring''' self.events.append("on_save" ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[Any] ) ->List[Any]: '''simple docstring''' self.events.append("on_log" ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : Tuple ) ->Optional[int]: '''simple docstring''' self.events.append("on_prediction_step" ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Tuple ) ->str: '''simple docstring''' _UpperCAmelCase : Dict = tempfile.mkdtemp() def lowerCAmelCase__ ( self : List[Any] ) ->List[str]: '''simple docstring''' shutil.rmtree(self.output_dir ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : Any=None , lowerCamelCase__ : Union[str, Any]=False , **lowerCamelCase__ : Dict ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = RegressionDataset(length=UpperCamelCase__ ) _UpperCAmelCase : str = RegressionDataset(length=UpperCamelCase__ ) _UpperCAmelCase : Any = RegressionModelConfig(a=UpperCamelCase__ , b=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = RegressionPreTrainedModel(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase__ , report_to=[] , **UpperCamelCase__ ) return Trainer( UpperCamelCase__ , UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , callbacks=UpperCamelCase__ , ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) ->Any: '''simple docstring''' self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) # Order doesn't matter _UpperCAmelCase : Any = sorted(UpperCamelCase__ , key=lambda lowerCamelCase__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ ) _UpperCAmelCase : Dict = sorted(UpperCamelCase__ , key=lambda lowerCamelCase__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ ) for cba, cba in zip(UpperCamelCase__ , UpperCamelCase__ ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , cba.__class__ ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(cba.__class__ , UpperCamelCase__ ) else: self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = ["on_init_end", "on_train_begin"] _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : int = len(trainer.get_eval_dataloader() ) _UpperCAmelCase : str = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(UpperCamelCase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : List[Any] = self.get_trainer() _UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # Callbacks passed at init are added to the default callbacks _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=UpperCamelCase__ ) _UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) def lowerCAmelCase__ ( self : List[str] ) ->int: '''simple docstring''' _UpperCAmelCase : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _UpperCAmelCase : List[str] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(UpperCamelCase__ ) expected_callbacks.remove(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) _UpperCAmelCase : List[str] = self.get_trainer() _UpperCAmelCase : Optional[int] = trainer.pop_callback(UpperCamelCase__ ) self.assertEqual(cb.__class__ , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) trainer.add_callback(UpperCamelCase__ ) expected_callbacks.insert(0 , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # We can also add, pop, or remove by instance _UpperCAmelCase : Optional[int] = self.get_trainer() _UpperCAmelCase : str = trainer.callback_handler.callbacks[0] trainer.remove_callback(UpperCamelCase__ ) expected_callbacks.remove(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = self.get_trainer() _UpperCAmelCase : Optional[Any] = trainer.callback_handler.callbacks[0] _UpperCAmelCase : Tuple = trainer.pop_callback(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) trainer.add_callback(UpperCamelCase__ ) expected_callbacks.insert(0 , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) def lowerCAmelCase__ ( self : str ) ->Dict: '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=UpperCamelCase__ ) _UpperCAmelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _UpperCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # Independent log/save/eval _UpperCAmelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _UpperCAmelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() _UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() _UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # A bit of everything _UpperCAmelCase : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() _UpperCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: _UpperCAmelCase : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(UpperCamelCase__ ) in warn_mock.call_args[0][0]
369
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : int = "speech_to_text_2" lowerCAmelCase : str = ["past_key_values"] lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Any = vocab_size _UpperCAmelCase : Optional[int] = d_model _UpperCAmelCase : List[Any] = decoder_ffn_dim _UpperCAmelCase : Any = decoder_layers _UpperCAmelCase : int = decoder_attention_heads _UpperCAmelCase : Any = dropout _UpperCAmelCase : List[Any] = attention_dropout _UpperCAmelCase : Optional[int] = activation_dropout _UpperCAmelCase : List[Any] = activation_function _UpperCAmelCase : int = init_std _UpperCAmelCase : Dict = decoder_layerdrop _UpperCAmelCase : str = use_cache _UpperCAmelCase : Union[str, Any] = decoder_layers _UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCAmelCase : Any = max_target_positions super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
322
0
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def __UpperCamelCase ( UpperCAmelCase ): return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : Dict = np.nan for i in range(UpperCAmelCase ): lowercase__ : Optional[Any] = features[:, labels == i] lowercase__ : Optional[Any] = data.mean(1 ) # Centralize the data of class i lowercase__ : Dict = data - column_reshape(UpperCAmelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCAmelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : Tuple = features.mean(1 ) lowercase__ : Dict = np.nan for i in range(UpperCAmelCase ): lowercase__ : List[str] = features[:, labels == i] lowercase__ : int = data.shape[1] lowercase__ : Optional[int] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : Optional[int] = device_data * np.dot( column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): # Check if the features have been loaded if features.any(): lowercase__ : Optional[Any] = features.mean(1 ) # Center the dataset lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) ) lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1] lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase ) # Take all the columns in the reverse order (-1), and then takes only the first lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase ) logging.info('''Principal Component Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase ) logging.error('''Dataset empty''' ) raise AssertionError def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): assert classes > dimensions # Check if features have been already loaded if features.any: lowercase__ , lowercase__ : Any = eigh( covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions] lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase ) lowercase__ : List[str] = svd_matrix[:, 0:dimensions] lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase ) logging.info('''Linear Discriminant Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase ) logging.error('''Dataset empty''' ) raise AssertionError def __UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] ) lowercase__ : str = 2 lowercase__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCAmelCase ) as error_info: lowercase__ : int = linear_discriminant_analysis( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if isinstance(UpperCAmelCase , np.ndarray ): raise AssertionError( '''Did not raise AssertionError for dimensions > classes''' ) assert error_info.type is AssertionError def __UpperCamelCase ( ): lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowercase__ : int = 2 lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCAmelCase ) as error_info: lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase ) if not np.allclose(UpperCAmelCase , UpperCAmelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
198
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase = 1 , UpperCAmelCase = 1000 ): lowercase__ : Dict = 1 lowercase__ : Dict = 0 for divide_by_number in range(UpperCAmelCase , digit + 1 ): lowercase__ : list[int] = [] lowercase__ : Union[str, Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCAmelCase ): lowercase__ : Dict = len(UpperCAmelCase ) lowercase__ : Optional[Any] = divide_by_number else: has_been_divided.append(UpperCAmelCase ) lowercase__ : int = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
198
1
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: SCREAMING_SNAKE_CASE__ = mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) else: SCREAMING_SNAKE_CASE__ = max( mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) , mf_knapsack(i - 1 , snake_case_ , snake_case_ , j - wt[i - 1] ) + val[i - 1] , ) SCREAMING_SNAKE_CASE__ = val return f[i][j] def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: SCREAMING_SNAKE_CASE__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: SCREAMING_SNAKE_CASE__ = dp[i - 1][w_] return dp[n][w_], dp def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' if not (isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) SCREAMING_SNAKE_CASE__ = len(snake_case_ ) if num_items != len(snake_case_ ): SCREAMING_SNAKE_CASE__ = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(snake_case_ )} values' ) raise ValueError(snake_case_ ) for i in range(snake_case_ ): if not isinstance(wt[i] , snake_case_ ): SCREAMING_SNAKE_CASE__ = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(snake_case_ ) SCREAMING_SNAKE_CASE__ = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE__ = set() _construct_solution(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return optimal_val, example_optional_set def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(snake_case_ , snake_case_ , i - 1 , snake_case_ , snake_case_ ) else: optimal_set.add(snake_case_ ) _construct_solution(snake_case_ , snake_case_ , i - 1 , j - wt[i - 1] , snake_case_ ) if __name__ == "__main__": __snake_case = [3, 2, 4, 4] __snake_case = [4, 3, 2, 3] __snake_case = 4 __snake_case = 6 __snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __snake_case = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __snake_case = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
355
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case = 25_60_47 __snake_case = 25_61_45 @require_sentencepiece @require_tokenizers class lowercase__ ( _UpperCAmelCase , unittest.TestCase ): A__ : int =NllbTokenizer A__ : Optional[int] =NllbTokenizerFast A__ : Union[str, Any] =True A__ : Dict =True A__ : Tuple ={} def A_ ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ = NllbTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @require_torch def A_ ( self : Tuple ): if not self.test_seqaseq: return SCREAMING_SNAKE_CASE__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Longer text that will definitely require truncation. SCREAMING_SNAKE_CASE__ = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] SCREAMING_SNAKE_CASE__ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch( UpperCAmelCase_ , tgt_texts=UpperCAmelCase_ , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) SCREAMING_SNAKE_CASE__ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , UpperCAmelCase_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def A_ ( self : List[Any] ): pass def A_ ( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): SCREAMING_SNAKE_CASE__ = [AddedToken('<special>' , lstrip=UpperCAmelCase_ )] SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('Hey this is a <special> token' ) SCREAMING_SNAKE_CASE__ = tokenizer_r.encode('<special>' , add_special_tokens=UpperCAmelCase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained( UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer_p.encode('Hey this is a <special> token' ) SCREAMING_SNAKE_CASE__ = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): A__ : List[Any] ="""facebook/nllb-200-distilled-600M""" A__ : Tuple =[ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] A__ : Optional[Any] =[ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] A__ : Optional[int] =[ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def A_ ( cls : Tuple ): SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) SCREAMING_SNAKE_CASE__ = 1 return cls def A_ ( self : int ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 ) def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) def A_ ( self : Dict ): self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE__ = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = 10 SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) def A_ ( self : Optional[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] ) def A_ ( self : Dict ): SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = NllbTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ ) @require_torch def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) SCREAMING_SNAKE_CASE__ = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ = targets['input_ids'] SCREAMING_SNAKE_CASE__ = shift_tokens_right( UpperCAmelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def A_ ( self : List[str] ): SCREAMING_SNAKE_CASE__ = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , { # A, test, EOS, en_XX 'input_ids': [[256047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 256057, } , ) @require_torch def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
169
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ): """simple docstring""" return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ): """simple docstring""" __UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) __UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) __UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) __UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) __UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ): """simple docstring""" if split_mlp_wi: __UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] __UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] __UpperCAmelCase : Dict = (wi_a, wi_a) else: __UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] __UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ): """simple docstring""" return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i] def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ): """simple docstring""" __UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] ) __UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowerCAmelCase__ ) __UpperCAmelCase : Any = collections.OrderedDict() # Shared embeddings. __UpperCAmelCase : int = old["""token_embedder/embedding"""] # Encoder. for i in range(lowerCAmelCase__ ): # Block i, layer 0 (Self Attention). __UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" ) __UpperCAmelCase : Any = layer_norm __UpperCAmelCase : List[Any] = k.T __UpperCAmelCase : Optional[int] = o.T __UpperCAmelCase : str = q.T __UpperCAmelCase : Any = v.T # Block i, layer 1 (MLP). __UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" ) __UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = layer_norm if split_mlp_wi: __UpperCAmelCase : List[Any] = wi[0].T __UpperCAmelCase : Any = wi[1].T else: __UpperCAmelCase : Tuple = wi.T __UpperCAmelCase : Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCAmelCase : Dict = tax_relpos_bias_lookup( lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T __UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""] if not scalable_attention: __UpperCAmelCase : Any = tax_relpos_bias_lookup( lowerCAmelCase__ , 0 , """encoder""" ).T __UpperCAmelCase : Dict = tax_relpos_bias_lookup( lowerCAmelCase__ , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(lowerCAmelCase__ ): # Block i, layer 0 (Self Attention). __UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" ) __UpperCAmelCase : int = layer_norm __UpperCAmelCase : Optional[Any] = k.T __UpperCAmelCase : Dict = o.T __UpperCAmelCase : int = q.T __UpperCAmelCase : List[str] = v.T # Block i, layer 1 (Cross Attention). __UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" ) __UpperCAmelCase : Union[str, Any] = layer_norm __UpperCAmelCase : List[Any] = k.T __UpperCAmelCase : int = o.T __UpperCAmelCase : Optional[int] = q.T __UpperCAmelCase : Optional[int] = v.T # Block i, layer 2 (MLP). __UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" ) __UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ ) __UpperCAmelCase : Optional[int] = layer_norm if split_mlp_wi: __UpperCAmelCase : Optional[Any] = wi[0].T __UpperCAmelCase : Optional[int] = wi[1].T else: __UpperCAmelCase : str = wi.T __UpperCAmelCase : int = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T __UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T return new def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCAmelCase : str = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCAmelCase : List[str] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) __UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""] return state_dict def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ): """simple docstring""" __UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ ) __UpperCAmelCase : Any = convert_tax_to_pytorch( lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ ) __UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ): """simple docstring""" __UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ ) print(f'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ ) else: __UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowerCAmelCase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowerCAmelCase__ ) print("""Done""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) _UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
254
'''simple docstring''' from sklearn.metrics import fa_score import datasets _UpperCamelCase = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' _UpperCamelCase = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' _UpperCamelCase = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def __A ( self ) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = fa_score( __UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase ) return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
254
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
206
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowercase : lowercase__ : torch.Tensor # [batch_size x 3] lowercase__ : torch.Tensor # [batch_size x 3] lowercase__ : torch.Tensor # [batch_size x 3] lowercase__ : torch.Tensor # [batch_size x 3] lowercase__ : int lowercase__ : int lowercase__ : float lowercase__ : float lowercase__ : Tuple[int] def __snake_case( self : str ) -> Dict: '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __snake_case( self : int ) -> str: '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __snake_case( self : Tuple ) -> List[str]: '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __snake_case( self : Any ) -> torch.Tensor: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width ) SCREAMING_SNAKE_CASE = torch.stack( [ pixel_indices % self.width, torch.div(_UpperCamelCase , self.width , rounding_mode="trunc" ), ] , axis=1 , ) return coords @property def __snake_case( self : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.shape SCREAMING_SNAKE_CASE = int(np.prod(_UpperCamelCase ) ) SCREAMING_SNAKE_CASE = self.get_image_coords() SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) SCREAMING_SNAKE_CASE = self.get_camera_rays(_UpperCamelCase ) SCREAMING_SNAKE_CASE = rays.view(_UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __snake_case( self : Optional[int] , _UpperCamelCase : torch.Tensor ) -> torch.Tensor: '''simple docstring''' SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] SCREAMING_SNAKE_CASE = coords.view(_UpperCamelCase , -1 , 2 ) SCREAMING_SNAKE_CASE = self.resolution() SCREAMING_SNAKE_CASE = self.fov() SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1 SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 ) SCREAMING_SNAKE_CASE = fracs.view(_UpperCamelCase , -1 , 2 ) SCREAMING_SNAKE_CASE = ( self.z.view(_UpperCamelCase , 1 , 3 ) + self.x.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:] ) SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.stack( [ torch.broadcast_to(self.origin.view(_UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_UpperCamelCase , *_UpperCamelCase , 2 , 3 ) def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> "DifferentiableProjectiveCamera": '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCamelCase , height=_UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , ) def __lowerCamelCase (UpperCAmelCase__ : int ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): SCREAMING_SNAKE_CASE = np.array([np.sin(UpperCAmelCase__ ), np.cos(UpperCAmelCase__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) SCREAMING_SNAKE_CASE = -z * 4 SCREAMING_SNAKE_CASE = np.array([np.cos(UpperCAmelCase__ ), -np.sin(UpperCAmelCase__ ), 0.0] ) SCREAMING_SNAKE_CASE = np.cross(UpperCAmelCase__ , UpperCAmelCase__ ) origins.append(UpperCAmelCase__ ) xs.append(UpperCAmelCase__ ) ys.append(UpperCAmelCase__ ) zs.append(UpperCAmelCase__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , width=UpperCAmelCase__ , height=UpperCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase__ )) , )
206
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = "x" , lowercase__ = 10**-10 , lowercase__ = 1 , ): __SCREAMING_SNAKE_CASE : Optional[Any] = symbols(lowercase__ ) __SCREAMING_SNAKE_CASE : Any = lambdify(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) ) __SCREAMING_SNAKE_CASE : Optional[int] = starting_point while True: if diff_function(lowercase__ ) != 0: __SCREAMING_SNAKE_CASE : int = prev_guess - multiplicity * func(lowercase__ ) / diff_function( lowercase__ ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __SCREAMING_SNAKE_CASE : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson("log(y) - 1", 2, variable="y")}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson("exp(x) - 1", 1_0, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
9
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __lowerCAmelCase : List[str] ='true' def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ): set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel() __SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ ) __SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ ) model.to(accelerator.device ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ ) return model, ddp_model, dataloader def _UpperCamelCase ( lowercase__ , lowercase__=False ): __SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) __SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ ) return outputs with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE : Tuple = dataset.map( lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) __SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowercase__ ): if use_longest: return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 ) def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches ) __SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = [] for batch in dataloader: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Dict = model(lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], [] for logit, targ in logits_and_targets: logits.append(lowercase__ ) targs.append(lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ ) return logits, targs def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ ) assert ( len(lowercase__ ) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}''' def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ): __SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ ) # First do baseline __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no'''] model.to(lowercase__ ) model.eval() for batch in dataloader: batch.to(lowercase__ ) with torch.inference_mode(): __SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ ) __SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] ) __SCREAMING_SNAKE_CASE : int = metric.compute() # Then do distributed __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __SCREAMING_SNAKE_CASE : int = model(**lowercase__ ) __SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE : Any = batch['''labels'''] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=lowercase__ , references=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(lowercase__ , lowercase__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ ) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(lowercase__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) __SCREAMING_SNAKE_CASE : Tuple = Accelerator() test_torch_metrics(lowercase__ , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( lowercase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = { """facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""", } class __a ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'nllb-moe' SCREAMING_SNAKE_CASE_ = ['past_key_values'] SCREAMING_SNAKE_CASE_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Any , lowercase_ : Union[str, Any]=12_8112 , lowercase_ : int=1024 , lowercase_ : List[Any]=12 , lowercase_ : Optional[Any]=4096 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=12 , lowercase_ : List[Any]=4096 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[Any]=0.0_5 , lowercase_ : Dict=0.0_5 , lowercase_ : str=True , lowercase_ : Tuple=True , lowercase_ : Tuple="relu" , lowercase_ : List[Any]=1024 , lowercase_ : Tuple=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0_2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=True , lowercase_ : Tuple=False , lowercase_ : Tuple="float32" , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=128 , lowercase_ : str=64 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=0.0_0_1 , lowercase_ : Union[str, Any]=0.0_0_1 , lowercase_ : Optional[int]="all" , lowercase_ : int=False , lowercase_ : List[str]=False , lowercase_ : List[Any]=1.0 , lowercase_ : Any=0.2 , lowercase_ : Any=1 , lowercase_ : Any=0 , lowercase_ : List[str]=2 , lowercase_ : Dict=False , **lowercase_ : str , ): UpperCamelCase__ : List[str] =vocab_size UpperCamelCase__ : int =max_position_embeddings UpperCamelCase__ : int =d_model UpperCamelCase__ : Union[str, Any] =encoder_ffn_dim UpperCamelCase__ : Optional[int] =encoder_layers UpperCamelCase__ : int =encoder_attention_heads UpperCamelCase__ : Dict =decoder_ffn_dim UpperCamelCase__ : str =decoder_layers UpperCamelCase__ : Optional[Any] =decoder_attention_heads UpperCamelCase__ : Union[str, Any] =dropout UpperCamelCase__ : Union[str, Any] =attention_dropout UpperCamelCase__ : str =activation_dropout UpperCamelCase__ : int =activation_function UpperCamelCase__ : Any =init_std UpperCamelCase__ : List[str] =encoder_layerdrop UpperCamelCase__ : Optional[Any] =decoder_layerdrop UpperCamelCase__ : List[Any] =use_cache UpperCamelCase__ : Optional[Any] =encoder_layers UpperCamelCase__ : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ : Any =router_z_loss_coef UpperCamelCase__ : Any =router_aux_loss_coef UpperCamelCase__ : List[str] =decoder_sparse_step UpperCamelCase__ : List[str] =encoder_sparse_step UpperCamelCase__ : int =num_experts UpperCamelCase__ : Union[str, Any] =expert_capacity UpperCamelCase__ : Optional[Any] =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) UpperCamelCase__ : str =router_dtype UpperCamelCase__ : str =router_ignore_padding_tokens UpperCamelCase__ : Dict =batch_prioritized_routing UpperCamelCase__ : List[Any] =second_expert_policy UpperCamelCase__ : Any =normalize_router_prob_before_dropping UpperCamelCase__ : Any =moe_eval_capacity_token_fraction UpperCamelCase__ : Optional[Any] =moe_token_dropout UpperCamelCase__ : Optional[int] =output_router_logits super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
157
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[Any] = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class __a : """simple docstring""" def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ): logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) UpperCamelCase__ : Optional[Any] =model UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ ) UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ ) def __call__( self : Any , **lowercase_ : Any ): UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()} return self.model.run(lowercase_ , lowercase_ ) @staticmethod def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ): if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) UpperCamelCase__ : List[str] ='''CPUExecutionProvider''' return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ ) def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ): UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name ) UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ , lowercase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ ) if src_path.exists(): UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ , lowercase_ ) except shutil.SameFileError: pass def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ): if os.path.isfile(lowercase_ ): logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowercase_ , exist_ok=lowercase_ ) # saving model weights/files self._save_pretrained(lowercase_ , **lowercase_ ) @classmethod def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase_ ): UpperCamelCase__ : Any =OnnxRuntimeModel.load_model( os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ ) UpperCamelCase__ : List[str] =Path(lowercase_ ) # load model from hub else: # download model UpperCamelCase__ : Tuple =hf_hub_download( repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , ) UpperCamelCase__ : Any =Path(lowercase_ ).parent UpperCamelCase__ : List[Any] =Path(lowercase_ ).name UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ ) return cls(model=lowercase_ , **lowercase_ ) @classmethod def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ): UpperCamelCase__ : Dict =None if len(str(lowercase_ ).split('''@''' ) ) == 2: UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' ) return cls._from_pretrained( model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
157
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowerCamelCase ( unittest.TestCase): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCamelCase ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowerCamelCase ( snake_case__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = DonutImageProcessor if is_vision_available() else None def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCamelCase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCamelCase ( self ): """simple docstring""" pass @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
39
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging _a = logging.get_logger(__name__) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None: """simple docstring""" _UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) _a = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } _a = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict: """simple docstring""" try: _UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" F""" {n_student}""" ) return list(range(__lowerCAmelCase ) ) def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]: """simple docstring""" if n_student > n_teacher: raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(__lowerCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]: """simple docstring""" _UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(__lowerCAmelCase , __lowerCAmelCase ): AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience _UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval() else: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}""" _UpperCAmelCase = teacher.config.to_diff_dict() try: _UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: _UpperCAmelCase = teacher_e if d is None: _UpperCAmelCase = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): _UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: _UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: _UpperCAmelCase = teacher_e if d is None: _UpperCAmelCase = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__lowerCAmelCase ) # Copy weights _UpperCAmelCase = teacher.config_class(**__lowerCAmelCase ) _UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. _UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save _UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" F""" {save_path}""" ) student.save_pretrained(__lowerCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: _UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase ) if d_layers_to_copy is None: _UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase ) try: if hasattr( __lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase ) copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) _UpperCAmelCase = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(__lowerCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
39
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class _UpperCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , **lowercase_ : Optional[int]) -> Optional[Any]: """simple docstring""" logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.") _UpperCamelCase = model _UpperCamelCase = kwargs.get("model_save_dir" , _lowerCAmelCase) _UpperCamelCase = kwargs.get("latest_model_name" , _lowerCAmelCase) def __call__( self : List[Any] , **lowercase_ : Optional[int]) -> Tuple: """simple docstring""" _UpperCamelCase = {k: np.array(_lowerCAmelCase) for k, v in kwargs.items()} return self.model.run(_lowerCAmelCase , _lowerCAmelCase) @staticmethod def __UpperCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : List[Any]=None , lowercase_ : Any=None) -> str: """simple docstring""" if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider") _UpperCamelCase = """CPUExecutionProvider""" return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase) def __UpperCAmelCase ( self : Dict , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any]) -> str: """simple docstring""" _UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME _UpperCamelCase = self.model_save_dir.joinpath(self.latest_model_name) _UpperCamelCase = Path(_lowerCAmelCase).joinpath(_lowerCAmelCase) try: shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase) except shutil.SameFileError: pass # copy external weights (for models >2GB) _UpperCamelCase = self.model_save_dir.joinpath(_lowerCAmelCase) if src_path.exists(): _UpperCamelCase = Path(_lowerCAmelCase).joinpath(_lowerCAmelCase) try: shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase) except shutil.SameFileError: pass def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Union[str, Any] , ) -> int: """simple docstring""" if os.path.isfile(_lowerCAmelCase): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase) # saving model weights/files self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase) @classmethod def __UpperCAmelCase ( cls : Optional[Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : int , ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_lowerCAmelCase): _UpperCamelCase = OnnxRuntimeModel.load_model( os.path.join(_lowerCAmelCase , _lowerCAmelCase) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase) _UpperCamelCase = Path(_lowerCAmelCase) # load model from hub else: # download model _UpperCamelCase = hf_hub_download( repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , ) _UpperCamelCase = Path(_lowerCAmelCase).parent _UpperCamelCase = Path(_lowerCAmelCase).name _UpperCamelCase = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase) return cls(model=_lowerCAmelCase , **_lowerCAmelCase) @classmethod def __UpperCAmelCase ( cls : Optional[int] , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : int , ) -> str: """simple docstring""" _UpperCamelCase = None if len(str(_lowerCAmelCase).split("@")) == 2: _UpperCamelCase = model_id.split("@") return cls._from_pretrained( model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
370
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__=True ) ->List[str]: '''simple docstring''' model.train() _UpperCamelCase = model(a__ ) _UpperCamelCase = F.mse_loss(a__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(a__ ) def lowerCAmelCase__ ( a__ , a__=False ) ->Union[str, Any]: '''simple docstring''' set_seed(42 ) _UpperCamelCase = RegressionModel() _UpperCamelCase = deepcopy(a__ ) _UpperCamelCase = RegressionDataset(length=80 ) _UpperCamelCase = DataLoader(a__ , batch_size=16 ) model.to(accelerator.device ) if sched: _UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 ) _UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) _UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) _UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) # Make a copy of `model` if sched: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ , a__ , a__ ) else: _UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase__ ( a__ ) ->List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ ) # Use a single batch _UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(a__ , a__ , a__ , a__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )] def lowerCAmelCase__ ( a__ ) ->str: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ ) # Use a single batch _UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )] def lowerCAmelCase__ ( a__=False , a__=False ) ->List[Any]: '''simple docstring''' _UpperCamelCase = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ ) for iteration, batch in enumerate(a__ ): _UpperCamelCase , _UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model _UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) _UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )] GradientState._reset_state() def lowerCAmelCase__ ( a__=False , a__=False ) ->Dict: '''simple docstring''' _UpperCamelCase = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ , a__ ) for iteration, batch in enumerate(a__ ): _UpperCamelCase , _UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model _UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(a__ , a__ , a__ , a__ , a__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ )) if accelerator.num_processes > 1: check_model_parameters(a__ , a__ , a__ , a__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def lowerCAmelCase__ ( ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase = Accelerator() _UpperCamelCase = RegressionDataset(length=80 ) _UpperCamelCase = DataLoader(a__ , batch_size=16 ) _UpperCamelCase = RegressionDataset(length=96 ) _UpperCamelCase = DataLoader(a__ , batch_size=16 ) _UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if iteration < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if batch_num < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase__ ( ) ->int: '''simple docstring''' _UpperCamelCase = Accelerator() _UpperCamelCase = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(a__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(a__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(a__ , a__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ ) def lowerCAmelCase__ ( a__ ) ->Tuple: '''simple docstring''' main() if __name__ == "__main__": main()
63
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } lowercase_ = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> List[str]: for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __a = '''lm_head''' __a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: __a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> Union[str, Any]: __a = [] __a = fairseq_model.state_dict() __a = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) __a = True else: for key, mapped_key in MAPPING.items(): __a = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __a = True if "*" in mapped_key: __a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] __a = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: __a = '''weight_g''' elif "weight_v" in name: __a = '''weight_v''' elif "bias" in name: __a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __a = '''weight''' else: __a = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ) -> Dict: __a = full_name.split('''conv_layers.''' )[-1] __a = name.split('''.''' ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __a = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=True ) -> Tuple: if config_path is not None: __a = UniSpeechConfig.from_pretrained(lowerCAmelCase__ ) else: __a = UniSpeechConfig() if is_finetuned: if dict_path: __a = Dictionary.load_from_json(lowerCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a = target_dict.pad_index __a = target_dict.bos_index __a = target_dict.eos_index __a = len(target_dict.symbols ) __a = os.path.join(lowerCAmelCase__ , '''vocab.json''' ) if not os.path.isdir(lowerCAmelCase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase__ ) ) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) __a = target_dict.indices # fairseq has the <pad> and <s> switched __a = 42 __a = 43 with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) __a = WavaVecaPhonemeCTCTokenizer( lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase__ , ) __a = True if config.feat_extract_norm == '''layer''' else False __a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) __a = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) __a = UniSpeechForCTC(lowerCAmelCase__ ) else: __a = UniSpeechForPreTraining(lowerCAmelCase__ ) if is_finetuned: __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __a = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) hf_unispeech.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowercase_ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
45
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCamelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCamelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/""" def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : Any = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE )[0] @deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.data to implement this functionality.''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream: A_ : Union[str, Any] = _readaa(SCREAMING_SNAKE_CASE ) if magic != 2_051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) A_ : str = _readaa(SCREAMING_SNAKE_CASE ) A_ : Tuple = _readaa(SCREAMING_SNAKE_CASE ) A_ : Dict = _readaa(SCREAMING_SNAKE_CASE ) A_ : Tuple = bytestream.read(rows * cols * num_images ) A_ : Dict = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) A_ : Union[str, Any] = data.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 ) return data @deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.one_hot on tensors.''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = labels_dense.shape[0] A_ : List[str] = numpy.arange(SCREAMING_SNAKE_CASE ) * num_classes A_ : int = numpy.zeros((num_labels, num_classes) ) A_ : Tuple = 1 return labels_one_hot @deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.data to implement this functionality.''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 ): print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream: A_ : Any = _readaa(SCREAMING_SNAKE_CASE ) if magic != 2_049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) A_ : Tuple = _readaa(SCREAMING_SNAKE_CASE ) A_ : List[Any] = bytestream.read(SCREAMING_SNAKE_CASE ) A_ : int = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return labels class _lowerCamelCase : """simple docstring""" @deprecated( _SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , )->Tuple: '''simple docstring''' A_ , A_ : List[str] = random_seed.get_seed(_SCREAMING_SNAKE_CASE ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) A_ : Tuple = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: A_ : Optional[Any] = 1_0000 A_ : List[str] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'''images.shape: {images.shape} labels.shape: {labels.shape}''' A_ : List[str] = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 A_ : int = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. A_ : Optional[int] = images.astype(numpy.floataa ) A_ : List[str] = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 ) A_ : int = images A_ : Optional[int] = labels A_ : List[str] = 0 A_ : List[Any] = 0 @property def _snake_case ( self )->Optional[int]: '''simple docstring''' return self._images @property def _snake_case ( self )->Optional[Any]: '''simple docstring''' return self._labels @property def _snake_case ( self )->Union[str, Any]: '''simple docstring''' return self._num_examples @property def _snake_case ( self )->Union[str, Any]: '''simple docstring''' return self._epochs_completed def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True )->str: '''simple docstring''' if fake_data: A_ : Any = [1] * 784 A_ : int = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(_SCREAMING_SNAKE_CASE )], [fake_label for _ in range(_SCREAMING_SNAKE_CASE )], ) A_ : Any = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: A_ : Any = numpy.arange(self._num_examples ) numpy.random.shuffle(_SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = self.images[perma] A_ : List[Any] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch A_ : Tuple = self._num_examples - start A_ : Union[str, Any] = self._images[start : self._num_examples] A_ : List[Any] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: A_ : List[Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(_SCREAMING_SNAKE_CASE ) A_ : Tuple = self.images[perm] A_ : int = self.labels[perm] # Start next epoch A_ : Tuple = 0 A_ : Optional[Any] = batch_size - rest_num_examples A_ : Tuple = self._index_in_epoch A_ : Union[str, Any] = self._images[start:end] A_ : Any = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size A_ : List[str] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(SCREAMING_SNAKE_CASE , '''Please write your own downloading logic.''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if not gfile.Exists(SCREAMING_SNAKE_CASE ): gfile.MakeDirs(SCREAMING_SNAKE_CASE ) A_ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not gfile.Exists(SCREAMING_SNAKE_CASE ): urllib.request.urlretrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # noqa: S310 with gfile.GFile(SCREAMING_SNAKE_CASE ) as f: A_ : Dict = f.size() print('''Successfully downloaded''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''bytes.''' ) return filepath @deprecated( SCREAMING_SNAKE_CASE , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=dtypes.floataa , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=5_000 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE ) A_ : List[str] = fake() A_ : Tuple = fake() A_ : Union[str, Any] = fake() return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE ) if not source_url: # empty string check A_ : List[str] = DEFAULT_SOURCE_URL A_ : List[Any] = '''train-images-idx3-ubyte.gz''' A_ : Tuple = '''train-labels-idx1-ubyte.gz''' A_ : Optional[int] = '''t10k-images-idx3-ubyte.gz''' A_ : Any = '''t10k-labels-idx1-ubyte.gz''' A_ : Dict = _maybe_download( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: A_ : Optional[int] = _extract_images(SCREAMING_SNAKE_CASE ) A_ : List[str] = _maybe_download( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: A_ : Tuple = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE ) A_ : Dict = _maybe_download( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_images_file ) with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: A_ : List[str] = _extract_images(SCREAMING_SNAKE_CASE ) A_ : int = _maybe_download( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_labels_file ) with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: A_ : Any = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE ) if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE ): A_ : str = ( '''Validation size should be between 0 and ''' f'''{len(SCREAMING_SNAKE_CASE )}. Received: {validation_size}.''' ) raise ValueError(SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = train_images[:validation_size] A_ : Optional[Any] = train_labels[:validation_size] A_ : Any = train_images[validation_size:] A_ : Any = train_labels[validation_size:] A_ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} A_ : List[str] = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A_ : Dict = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A_ : Dict = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
186
0
import cva import numpy as np class A : def __init__(self , lowerCAmelCase , lowerCAmelCase ): if k in (0.04, 0.06): __lowercase= k __lowercase= window_size else: raise ValueError('invalid k value' ) def __str__(self ): return str(self.k ) def _A (self , lowerCAmelCase ): __lowercase= cva.imread(lowerCAmelCase , 0 ) __lowercase, __lowercase= img.shape __lowercase= [] __lowercase= img.copy() __lowercase= cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB ) __lowercase, __lowercase= np.gradient(lowerCAmelCase ) __lowercase= dx**2 __lowercase= dy**2 __lowercase= dx * dy __lowercase= 0.04 __lowercase= self.window_size // 2 for y in range(lowerCAmelCase , h - offset ): for x in range(lowerCAmelCase , w - offset ): __lowercase= ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= (wxx * wyy) - (wxy**2) __lowercase= wxx + wyy __lowercase= det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": lowerCAmelCase = HarrisCorner(0.0_4, 3) lowerCAmelCase ,lowerCAmelCase = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
355
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Any = ['bert-base-uncased', 'bert-base-cased'] UpperCAmelCase_ : Tuple = 'hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class lowercase__ ( tf.keras.Model ): '''simple docstring''' def __init__( self , __snake_case ): super().__init__() _SCREAMING_SNAKE_CASE : List[str] = tokenizer _SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_config(lowerCamelCase__ ) def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : str = self.bert(**lowerCamelCase__ ) return out["pooler_output"] @require_tf @require_tensorflow_text class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): super().setUp() _SCREAMING_SNAKE_CASE : Dict = [ BertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _SCREAMING_SNAKE_CASE : Any = [TFBertTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowerCamelCase__ , use_fast_bert_tokenizer=lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] _SCREAMING_SNAKE_CASE : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCAmelCase_ ( self ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): _SCREAMING_SNAKE_CASE : int = tokenizer(lowerCamelCase__ , return_tensors="""tf""" , padding="""longest""" ) _SCREAMING_SNAKE_CASE : Optional[int] = tf_tokenizer(lowerCamelCase__ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def UpperCAmelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer(self.paired_sentences ) _SCREAMING_SNAKE_CASE : Optional[int] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def UpperCAmelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE : int = tf.function(lowerCamelCase__ ) for test_inputs in (self.test_sentences, self.paired_sentences): _SCREAMING_SNAKE_CASE : List[str] = tf.constant(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : str = compiled_tokenizer(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : Optional[int] = tf_tokenizer(lowerCamelCase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCAmelCase_ ( self ): for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE : List[Any] = ModelToSave(tokenizer=lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(self.test_sentences ) _SCREAMING_SNAKE_CASE : str = model(lowerCamelCase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _SCREAMING_SNAKE_CASE : Optional[int] = Path(lowerCamelCase__ ) / """saved.model""" model.save(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.models.load_model(lowerCamelCase__ ) _SCREAMING_SNAKE_CASE : List[str] = loaded_model(lowerCamelCase__ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
200
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline __A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def A__ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ = [label.strip() for label in labels.split(""",""" ) if label.strip()] return labels def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0: raise ValueError("""You must include at least one label and at least one sequence.""" ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( """The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """ """Make sure the passed template includes formatting syntax such as {{}} where the label should go.""" ).format(lowerCamelCase__ ) ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ = [sequences] lowercase__ = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class A ( __UpperCAmelCase ): def __init__( self , lowerCamelCase__=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' lowercase__ = args_parser super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) if self.entailment_id == -1: logger.warning( """Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """ """-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" ) @property def A__ ( self ) -> int: '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("""entail""" ): return ind return -1 def A__ ( self , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ ) -> int: '''simple docstring''' lowercase__ = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( """Tokenizer was not supporting padding necessary for zero-shot, attempting to use """ """ `pad_token=eos_token`""" ) lowercase__ = self.tokenizer.eos_token try: lowercase__ = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , ) except Exception as e: if "too short" in str(lowerCamelCase__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. lowercase__ = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def A__ ( self , **lowerCamelCase__ ) -> Dict: '''simple docstring''' if kwargs.get("""multi_class""" , lowerCamelCase__ ) is not None: lowercase__ = kwargs["""multi_class"""] logger.warning( """The `multi_class` argument has been deprecated and renamed to `multi_label`. """ """`multi_class` will be removed in a future version of Transformers.""" ) lowercase__ = {} if "candidate_labels" in kwargs: lowercase__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] ) if "hypothesis_template" in kwargs: lowercase__ = kwargs["""hypothesis_template"""] lowercase__ = {} if "multi_label" in kwargs: lowercase__ = kwargs["""multi_label"""] return preprocess_params, {}, postprocess_params def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' if len(lowerCamelCase__ ) == 0: pass elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs: lowercase__ = args[0] else: raise ValueError(F'''Unable to understand extra arguments {args}''' ) return super().__call__(lowerCamelCase__ , **lowerCamelCase__ ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="This example is {}." ) -> Optional[Any]: '''simple docstring''' lowercase__ , lowercase__ = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ): lowercase__ = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase__ ) - 1, **model_input, } def A__ ( self , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' lowercase__ = inputs["""candidate_label"""] lowercase__ = inputs["""sequence"""] lowercase__ = {k: inputs[k] for k in self.tokenizer.model_input_names} lowercase__ = self.model(**lowerCamelCase__ ) lowercase__ = { """candidate_label""": candidate_label, """sequence""": sequence, """is_last""": inputs["""is_last"""], **outputs, } return model_outputs def A__ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> int: '''simple docstring''' lowercase__ = [outputs["""candidate_label"""] for outputs in model_outputs] lowercase__ = [outputs["""sequence"""] for outputs in model_outputs] lowercase__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] ) lowercase__ = logits.shape[0] lowercase__ = len(lowerCamelCase__ ) lowercase__ = N // n lowercase__ = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently lowercase__ = self.entailment_id lowercase__ = -1 if entailment_id == 0 else 0 lowercase__ = reshaped_outputs[..., [contradiction_id, entailment_id]] lowercase__ = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) lowercase__ = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels lowercase__ = reshaped_outputs[..., self.entailment_id] lowercase__ = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) lowercase__ = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
164
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCAmelCase__ ( ): lowercase :List[Any] = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=lowerCamelCase ) lowercase :int = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=lowerCamelCase ) env_command_parser(subparsers=lowerCamelCase ) launch_command_parser(subparsers=lowerCamelCase ) tpu_command_parser(subparsers=lowerCamelCase ) test_command_parser(subparsers=lowerCamelCase ) # Let's go lowercase :Optional[int] = parser.parse_args() if not hasattr(lowerCamelCase, "func" ): parser.print_help() exit(1 ) # Run args.func(lowerCamelCase ) if __name__ == "__main__": main()
158
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) _UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" _UpperCAmelCase : Any = "sshleifer/tiny-mbart" @require_torch class __lowerCAmelCase ( lowerCAmelCase): def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , ): lowercase :Any = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , ) lowercase :List[Any] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history if not do_eval: return lowercase :Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()] lowercase :Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowercase :Optional[Any] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE ( self: List[Any] ): self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: str ): self.run_seqaseq_quick(distributed=_lowerCAmelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: Tuple ): self.run_seqaseq_quick(distributed=_lowerCAmelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[int] ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Dict ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_lowerCAmelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): self.run_seqaseq_quick( distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_lowerCAmelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE ( self: List[Any] ): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any ): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout lowercase :List[Any] = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } lowercase :str = experiments[experiment_id] lowercase :Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} lowercase :List[str] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**_lowerCAmelCase , extra_args_str=data["extra_args_str"] ) lowercase :Dict = len(re.findall(_lowerCAmelCase , cl.err ) ) self.assertEqual(_lowerCAmelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :Dict = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCAmelCase , ) # Check metrics lowercase :List[str] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history lowercase :Dict = [log for log in logs if "eval_loss" in log.keys()] lowercase :str = eval_metrics[0] lowercase :Optional[int] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase ) # test if do_predict saves generations and metrics lowercase :Optional[Any] = os.listdir(_lowerCAmelCase ) lowercase :List[str] = {os.path.basename(_lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE ( self: Tuple ): from transformers.training_args import OptimizerNames def train_and_return_metrics(_lowerCAmelCase: str ) -> Tuple[int, float]: lowercase :Tuple = "--skip_memory_metrics 0" lowercase :List[str] = self.run_trainer( max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCAmelCase , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics lowercase :List[str] = TrainerState.load_from_json(Path(_lowerCAmelCase , "trainer_state.json" ) ).log_history lowercase :Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) lowercase :Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) lowercase :List[str] = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowercase , lowercase , lowercase :Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowercase , lowercase , lowercase :List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowercase :List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowercase :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig lowercase :List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowercase :Tuple = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowercase :Union[str, Any] = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( _lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: float = 3e-3 , _lowerCAmelCase: str = "adafactor" , _lowerCAmelCase: bool = False , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = None , ): lowercase :Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" lowercase :Optional[Any] = self.get_auto_remove_tmp_dir() lowercase :Tuple = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() lowercase :Union[str, Any] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCAmelCase )}\n ".split() lowercase :str = "\n --do_predict\n ".split() lowercase :Union[str, Any] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowercase :Optional[int] = get_gpu_count() lowercase :str = get_torch_dist_unique_port() lowercase :Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() lowercase :Optional[int] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowerCAmelCase , env=self.get_env() ) else: lowercase :Tuple = ["run_translation.py"] + args with patch.object(_lowerCAmelCase , "argv" , _lowerCAmelCase ): main() return output_dir
158
1
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __snake_case ( __lowerCAmelCase ): a__ = """gpt_bigcode""" a__ = ["""past_key_values"""] a__ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=5_02_57 , lowercase=10_24 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=None , lowercase="gelu_pytorch_tanh" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=True , lowercase=True , lowercase=True , **lowercase , ) -> Dict: '''simple docstring''' a__: str = vocab_size a__: Optional[int] = n_positions a__: str = n_embd a__: List[Any] = n_layer a__: Optional[Any] = n_head a__: str = n_inner a__: Optional[int] = activation_function a__: str = resid_pdrop a__: List[str] = embd_pdrop a__: int = attn_pdrop a__: Union[str, Any] = layer_norm_epsilon a__: int = initializer_range a__: List[str] = scale_attn_weights a__: str = use_cache a__: str = attention_softmax_in_fpaa a__: Optional[int] = scale_attention_softmax_in_fpaa a__: Optional[int] = multi_query a__: Union[str, Any] = bos_token_id a__: str = eos_token_id super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
203
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase__ = logging.get_logger(__name__) lowercase__ = Dict[str, Any] lowercase__ = List[Prediction] @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): def __init__( self , *lowercase , **lowercase) -> Dict: '''simple docstring''' super().__init__(*lowercase , **lowercase) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , 'vision') self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def lowerCamelCase_ ( self , **lowercase) -> int: '''simple docstring''' a__: Optional[Any] = {} if "threshold" in kwargs: a__: Dict = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self , *lowercase , **lowercase) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*lowercase , **lowercase) def lowerCamelCase_ ( self , lowercase) -> List[Any]: '''simple docstring''' a__: Optional[Any] = load_image(lowercase) a__: List[Any] = torch.IntTensor([[image.height, image.width]]) a__: Any = self.image_processor(images=[image] , return_tensors='pt') if self.tokenizer is not None: a__: Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt') a__: List[str] = target_size return inputs def lowerCamelCase_ ( self , lowercase) -> int: '''simple docstring''' a__: Any = model_inputs.pop('target_size') a__: Union[str, Any] = self.model(**lowercase) a__: List[str] = outputs.__class__({'target_size': target_size, **outputs}) if self.tokenizer is not None: a__: Union[str, Any] = model_inputs['bbox'] return model_outputs def lowerCamelCase_ ( self , lowercase , lowercase=0.9) -> Optional[Any]: '''simple docstring''' a__: int = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. a__ , a__: str = target_size[0].tolist() def unnormalize(lowercase): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ])) a__ , a__: Optional[Any] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1) a__: str = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] a__: Union[str, Any] = [unnormalize(lowercase) for bbox in model_outputs['bbox'].squeeze(0)] a__: Dict = ['score', 'label', 'box'] a__: Any = [dict(zip(lowercase , lowercase)) for vals in zip(scores.tolist() , lowercase , lowercase) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel a__: List[str] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase) a__: Tuple = raw_annotations[0] a__: List[str] = raw_annotation['scores'] a__: int = raw_annotation['labels'] a__: int = raw_annotation['boxes'] a__: List[Any] = scores.tolist() a__: Any = [self.model.config.idalabel[label.item()] for label in labels] a__: Dict = [self._get_bounding_box(lowercase) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] a__: Optional[Any] = ['score', 'label', 'box'] a__: List[Any] = [ dict(zip(lowercase , lowercase)) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes']) ] return annotation def lowerCamelCase_ ( self , lowercase) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.') a__ , a__ , a__ , a__: List[Any] = box.int().tolist() a__: Optional[int] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
203
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class A__ ( A__ , A__ ): lowerCAmelCase__ : Union[str, Any] = "nat" lowerCAmelCase__ : Any = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : int , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Union[str, Any]=[3, 4, 6, 5] , _UpperCAmelCase : str=[2, 4, 8, 16] , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : Tuple=3.0 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int=None , **_UpperCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(**_lowerCAmelCase ) __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(_lowerCAmelCase ) __lowercase = num_heads __lowercase = kernel_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) ) __lowercase = layer_scale_init_value __lowercase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
325
'''simple docstring''' from collections import Counter from timeit import timeit def __a ( UpperCAmelCase = "" , ) ->bool: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def __a ( UpperCAmelCase = "" ) ->bool: """simple docstring""" if len(UpperCAmelCase ) == 0: return True A = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string A = {} for character in lower_case_input_str: A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1 A = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __a ( UpperCAmelCase = "" ) ->None: """simple docstring""" print("""\nFor string = """ , UpperCAmelCase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": _lowerCamelCase : Any = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) _lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
258
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : str =KandinskyVaaInpaintPipeline a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] a : str =[ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] a : Optional[int] =[ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Dict =False @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 1_00 @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def lowerCamelCase__ ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = DDIMScheduler( num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask __lowerCAmelCase = np.ones((64, 64),dtype=np.floataa ) __lowerCAmelCase = 0 if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def lowerCamelCase__ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) __lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa ) __lowerCAmelCase = 0 __lowerCAmelCase = """a hat""" __lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( __SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple() __lowerCAmelCase = pipeline( image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",) __lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
46
'''simple docstring''' from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _lowerCAmelCase ( lowercase , lowercase , lowercase = False ) -> list[float]: if radian_mode: return [magnitude * cos(lowercase ), magnitude * sin(lowercase )] return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )] def _lowerCAmelCase ( lowercase , lowercase , lowercase = 10**-1 ) -> bool: __lowerCAmelCase = cross(lowercase , lowercase ) __lowerCAmelCase = sum(lowercase ) return abs(lowercase ) < eps if __name__ == "__main__": # Test to check if it works _a : Any = array( [ polar_force(718.4, 1_8_0 - 3_0), polar_force(879.54, 4_5), polar_force(1_0_0, -9_0), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) _a : Optional[int] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : Union[str, Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) _a : Optional[int] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
46
1
"""simple docstring""" import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py lowercase_ = "." if __name__ == "__main__": lowercase_ = os.path.join(REPO_PATH, "utils/documentation_tests.txt") lowercase_ = [] lowercase_ = [] with open(doctest_file_path) as fp: for line in fp: lowercase_ = line.strip() lowercase_ = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: lowercase_ = "\n".join(non_existent_paths) raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
45
"""simple docstring""" __magic_name__ = "Tobias Carryer" from time import time class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=int(time())): # noqa: B008 __SCREAMING_SNAKE_CASE = multiplier __SCREAMING_SNAKE_CASE = increment __SCREAMING_SNAKE_CASE = modulo __SCREAMING_SNAKE_CASE = seed def snake_case_ ( self): __SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __magic_name__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31) while True: print(lcg.next_number())
100
0
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
100
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
100
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCamelCase = Features({'audio': Audio()} ) __lowerCamelCase = Features({'transcription': Value('string' )} ) __lowerCamelCase = "audio" __lowerCamelCase = "transcription" def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.audio_column not in features: raise ValueError(F'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase ): raise ValueError(F'Column {self.audio_column} is not an Audio type.' ) A__ = copy.deepcopy(self ) A__ = self.input_schema.copy() A__ = features[self.audio_column] A__ = input_schema return task_template @property def UpperCamelCase ( self ) -> Dict[str, str]: '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
68
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowerCAmelCase__ ( unittest.TestCase ): def _snake_case ( self ): """simple docstring""" lowercase_ : List[str] = [10, 20, 30, 40, 50, 60] lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12] lowercase_ : Union[str, Any] = 1_00 self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 ) def _snake_case ( self ): """simple docstring""" self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def _snake_case ( self ): """simple docstring""" self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' ) def _snake_case ( self ): """simple docstring""" self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' ) def _snake_case ( self ): """simple docstring""" self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def _snake_case ( self ): """simple docstring""" self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' ) if __name__ == "__main__": unittest.main()
93
0
from __future__ import annotations def __UpperCamelCase ( _A : Optional[int] , _A : str , _A : Tuple ) ->List[str]: """simple docstring""" lowerCamelCase_ =list(range(len(lowercase__ ) ) ) lowerCamelCase_ =[v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda _A : ratio[i] , reverse=lowercase__ ) lowerCamelCase_ =0 lowerCamelCase_ =[0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: lowerCamelCase_ =1 max_value += value[i] capacity -= weight[i] else: lowerCamelCase_ =capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
362
import math def __UpperCamelCase ( _A : int = 100 ) ->int: """simple docstring""" lowerCamelCase_ =sum(i * i for i in range(1 , n + 1 ) ) lowerCamelCase_ =int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
49
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __snake_case : Optional[int] =1_6 __snake_case : Optional[int] =3_2 def lowerCAmelCase__ ( lowerCamelCase_ : Accelerator ,lowerCamelCase_ : int = 16): '''simple docstring''' lowerCAmelCase__ : int = AutoTokenizer.from_pretrained('''bert-base-cased''') lowerCAmelCase__ : List[str] = load_dataset('''glue''' ,'''mrpc''') def tokenize_function(lowerCamelCase_ : List[Any]): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : List[str] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ : Union[str, Any] = datasets.map( lowerCamelCase_ ,batched=lowerCamelCase_ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : str = tokenized_datasets.rename_column('''label''' ,'''labels''') def collate_fn(lowerCamelCase_ : List[Any]): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ : List[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase__ : int = 8 else: lowerCAmelCase__ : Any = None return tokenizer.pad( lowerCamelCase_ ,padding='''longest''' ,max_length=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_tensors='''pt''' ,) # Instantiate dataloaders. lowerCAmelCase__ : str = DataLoader( tokenized_datasets['''train'''] ,shuffle=lowerCamelCase_ ,collate_fn=lowerCamelCase_ ,batch_size=lowerCamelCase_) lowerCAmelCase__ : Tuple = DataLoader( tokenized_datasets['''validation'''] ,shuffle=lowerCamelCase_ ,collate_fn=lowerCamelCase_ ,batch_size=lowerCamelCase_) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __snake_case : List[str] =mocked_dataloaders # noqa: F811 def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any]): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,lowerCamelCase_) == "1": lowerCAmelCase__ : Dict = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCAmelCase__ : Any = Accelerator( cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir) else: lowerCAmelCase__ : Optional[Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : str = config['''lr'''] lowerCAmelCase__ : Tuple = int(config['''num_epochs''']) lowerCAmelCase__ : Union[str, Any] = int(config['''seed''']) lowerCAmelCase__ : Optional[Any] = int(config['''batch_size''']) set_seed(lowerCamelCase_) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(lowerCamelCase_ ,lowerCamelCase_) lowerCAmelCase__ : Tuple = evaluate.load('''glue''' ,'''mrpc''') # If the batch size is too big we use gradient accumulation lowerCAmelCase__ : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=lowerCamelCase_) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device) # Instantiate optimizer lowerCAmelCase__ : Dict = AdamW(params=model.parameters() ,lr=lowerCamelCase_) # Instantiate scheduler lowerCAmelCase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ ,num_warmup_steps=100 ,num_training_steps=(len(lowerCamelCase_) * num_epochs) // gradient_accumulation_steps ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCAmelCase__ : int = os.path.split(lowerCamelCase_)[-1].split('''.''')[0] accelerator.init_trackers(lowerCamelCase_ ,lowerCamelCase_) # Now we train the model for epoch in range(lowerCamelCase_): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCAmelCase__ : Optional[Any] = 0 for step, batch in enumerate(lowerCamelCase_): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) lowerCAmelCase__ : Optional[int] = model(**lowerCamelCase_) lowerCAmelCase__ : Optional[Any] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**lowerCamelCase_) lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1) lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels'''])) metric.add_batch( predictions=lowerCamelCase_ ,references=lowerCamelCase_ ,) lowerCAmelCase__ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" ,lowerCamelCase_) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(lowerCamelCase_), '''epoch''': epoch, } ,step=lowerCamelCase_ ,) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''') parser.add_argument( '''--mixed_precision''' ,type=lowerCamelCase_ ,default=lowerCamelCase_ ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''') parser.add_argument( '''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,) parser.add_argument( '''--project_dir''' ,type=lowerCamelCase_ ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,) lowerCAmelCase__ : str = parser.parse_args() lowerCAmelCase__ : List[str] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCamelCase_ ,lowerCamelCase_) if __name__ == "__main__": main()
129
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase__ ( lowerCamelCase_ : Any): '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''') if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''') if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''') if "img_encoder.layers" in name: lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''') if "blocks" in name and "res" not in name: lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''') if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''') if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''') if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''') if "norm1" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''') if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''') if "img_encoder.norm" in name: lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''') # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''') if "text_encoder.positional_embedding" in name: lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''') if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''') if "ln_1" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''') if "ln_2" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''') if "c_fc" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''') if "c_proj" in name: lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''') if "text_encoder" in name: lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''') if "ln_final" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''') # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''') if "img_projector.linear_out." in name: lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''') if "text_projector.linear_hidden" in name: lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''') if "text_projector.linear_out" in name: lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''') return name def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ : Tuple = key.split('''.''') lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4]) lowerCAmelCase__ : Any = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ : Tuple = val[:dim, :] lowerCAmelCase__ : Dict = val[dim : dim * 2, :] lowerCAmelCase__ : List[str] = val[-dim:, :] else: lowerCAmelCase__ : List[Any] = val[:dim] lowerCAmelCase__ : List[str] = val[dim : dim * 2] lowerCAmelCase__ : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ : Dict = key.split('''.''') lowerCAmelCase__ : List[str] = int(key_split[3]) lowerCAmelCase__ : Any = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ : Tuple = val[:dim, :] lowerCAmelCase__ : Union[str, Any] = val[ dim : dim * 2, : ] lowerCAmelCase__ : List[Any] = val[-dim:, :] else: lowerCAmelCase__ : Union[str, Any] = val[:dim] lowerCAmelCase__ : List[str] = val[dim : dim * 2] lowerCAmelCase__ : str = val[-dim:] else: lowerCAmelCase__ : int = rename_key(lowerCamelCase_) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ : Dict = val.squeeze_() else: lowerCAmelCase__ : Tuple = val return orig_state_dict def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw) return im @torch.no_grad() def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False): '''simple docstring''' lowerCAmelCase__ : Dict = GroupViTConfig() lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval() lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model'''] lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_) lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0) # verify result lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''') lowerCAmelCase__ : Tuple = prepare_img() lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''') with torch.no_grad(): lowerCAmelCase__ : str = model(**lowerCamelCase_) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]]) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]]) else: raise ValueError(f"""Model name {model_name} not supported.""") assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3) processor.save_pretrained(lowerCamelCase_) model.save_pretrained(lowerCamelCase_) print('''Successfully saved processor and model to''' ,lowerCamelCase_) if push_to_hub: print('''Pushing to the hub...''') processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''') model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''') if __name__ == "__main__": __snake_case : int =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) __snake_case : Tuple =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
129
1
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowercase : List[str] = logging.get_logger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple: lowercase : Tuple = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) lowercase : str = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok lowercase : List[Any] = 847 lowercase : List[Any] = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok lowercase : Optional[Any] = 150 lowercase : int = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok lowercase : int = 171 lowercase : Tuple = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO lowercase : Optional[int] = 133 lowercase : List[Any] = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok lowercase : int = 19 lowercase : str = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok lowercase : Optional[Any] = 65 lowercase : Tuple = """mapillary-vistas-id2label.json""" lowercase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} return config def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: lowercase : Tuple = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") ) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") ) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") ) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") ) # cross-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") ) # MLP 1 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") ) # MLP 2 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") ) # layernorm 1 (self-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") ) # layernorm 3 (final layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") ) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") ) rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: lowercase : List[Any] = dct.pop(SCREAMING_SNAKE_CASE__ ) lowercase : str = val def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: lowercase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowercase : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowercase : Dict = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" ) lowercase : str = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowercase : Union[str, Any] = in_proj_weight[:dim, :] lowercase : str = in_proj_bias[: dim] lowercase : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] lowercase : Any = in_proj_bias[ dim : dim * 2 ] lowercase : Optional[Any] = in_proj_weight[ -dim :, : ] lowercase : Any = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: # fmt: off lowercase : Dict = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) lowercase : Dict = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" ) lowercase : Dict = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict lowercase : str = in_proj_weight[: hidden_size, :] lowercase : Union[str, Any] = in_proj_bias[:config.hidden_size] lowercase : Tuple = in_proj_weight[hidden_size : hidden_size * 2, :] lowercase : Dict = in_proj_bias[hidden_size : hidden_size * 2] lowercase : Any = in_proj_weight[-hidden_size :, :] lowercase : Optional[int] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) lowercase : Union[str, Any] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" ) lowercase : Dict = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict lowercase : List[Any] = in_proj_weight[: hidden_size, :] lowercase : List[str] = in_proj_bias[:config.hidden_size] lowercase : str = in_proj_weight[hidden_size : hidden_size * 2, :] lowercase : Dict = in_proj_bias[hidden_size : hidden_size * 2] lowercase : Optional[Any] = in_proj_weight[-hidden_size :, :] lowercase : Union[str, Any] = in_proj_bias[-hidden_size :] # fmt: on def _snake_case( ) -> torch.Tensor: lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> Tuple: lowercase : int = get_maskformer_config(SCREAMING_SNAKE_CASE__ ) # load original state_dict with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f: lowercase : Any = pickle.load(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys lowercase : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # update to torch tensors for key, value in state_dict.items(): lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # load 🤗 model lowercase : Union[str, Any] = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() for name, param in model.named_parameters(): print(SCREAMING_SNAKE_CASE__ , param.shape ) lowercase , lowercase : Optional[int] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(SCREAMING_SNAKE_CASE__ ) == 0, f"Unexpected keys: {unexpected_keys}" # verify results lowercase : str = prepare_img() if "vistas" in model_name: lowercase : int = 65 elif "cityscapes" in model_name: lowercase : Dict = 65_535 else: lowercase : List[Any] = 255 lowercase : Optional[Any] = True if """ade""" in model_name else False lowercase : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": lowercase : str = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}" ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f"nielsr/{model_name}" ) image_processor.push_to_hub(f"nielsr/{model_name}" ) if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase : Dict = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
285
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowercase : Tuple = get_logger(__name__) lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md""" lowercase : Dict = uuida().hex lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str: lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + user_agent return ua def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict: if token is None: lowercase : Optional[int] = HfFolder.get_token() if organization is None: lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]: return lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , ) lowercase : str = os.path.join(args.output_dir , """README.md""" ) model_card.save(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]: if resolved_file is None or commit_hash is not None: return commit_hash lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() ) lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ ) if search is None: return None lowercase : List[Any] = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowercase : Optional[Any] = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""") def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None: if new_cache_dir is None: lowercase : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: lowercase : List[str] = old_diffusers_cache lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser() lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) try: os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): lowercase : Any = 0 else: with open(cache_version_file) as f: try: lowercase : List[Any] = int(f.read()) except ValueError: lowercase : int = 0 if cache_version < 1: lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' """the directory exists and can be written to.""" ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str: if variant is not None: lowercase : List[str] = weights_name.split(""".""" ) lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:] lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ ) return weights_name def _snake_case( SCREAMING_SNAKE_CASE__ , *, SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]: lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE__ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): # Load from a PyTorch checkpoint lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" ) ): try: lowercase : Any = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , ) try: # 2. Load model file as usual lowercase : int = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " """this model name. Check the model page at """ f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" )
285
1
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class _a : '''simple docstring''' def __init__( self , A__ , A__=14 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ): A__ : List[str] = parent A__ : List[str] = batch_size A__ : str = seq_length A__ : Optional[int] = is_training A__ : Optional[int] = use_token_type_ids A__ : List[Any] = use_input_mask A__ : Optional[Any] = use_labels A__ : Optional[int] = use_mc_token_ids A__ : Any = vocab_size A__ : int = hidden_size A__ : Dict = num_hidden_layers A__ : List[Any] = num_attention_heads A__ : str = intermediate_size A__ : Union[str, Any] = hidden_act A__ : Optional[Any] = hidden_dropout_prob A__ : int = attention_probs_dropout_prob A__ : Dict = max_position_embeddings A__ : List[str] = type_vocab_size A__ : int = type_sequence_label_size A__ : List[Any] = initializer_range A__ : int = num_labels A__ : Union[str, Any] = num_choices A__ : Optional[int] = scope A__ : str = self.vocab_size - 1 def __A ( self ): A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[Any] = None if self.use_input_mask: A__ : str = random_attention_mask([self.batch_size, self.seq_length] ) A__ : int = None if self.use_token_type_ids: A__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : str = None if self.use_mc_token_ids: A__ : Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) A__ : List[Any] = None A__ : Union[str, Any] = None A__ : Union[str, Any] = None if self.use_labels: A__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : int = ids_tensor([self.batch_size] , self.num_choices ) A__ : List[str] = self.get_config() A__ : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ ): A__ : Dict = CTRLModel(config=A__ ) model.to(A__ ) model.eval() model(A__ , token_type_ids=A__ , head_mask=A__ ) model(A__ , token_type_ids=A__ ) A__ : Optional[Any] = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ ): A__ : List[Any] = CTRLLMHeadModel(A__ ) model.to(A__ ) model.eval() A__ : Optional[int] = model(A__ , token_type_ids=A__ , labels=A__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self ): A__ : Optional[Any] = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : str = config_and_inputs A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def __A ( self , A__ , A__ , A__ , A__ , *A__ ): A__ : Tuple = self.num_labels A__ : Union[str, Any] = CTRLForSequenceClassification(A__ ) model.to(A__ ) model.eval() A__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Union[str, Any] = model(A__ , token_type_ids=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class _a (A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__: List[str] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__: List[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__: int = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__: Optional[Any] = True UpperCAmelCase__: Any = False UpperCAmelCase__: str = False def __A ( self , A__ , A__ , A__ , A__ , A__ ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __A ( self ): A__ : Optional[int] = CTRLModelTester(self ) A__ : Optional[int] = ConfigTester(self , config_class=A__ , n_embd=37 ) def __A ( self ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*A__ ) def __A ( self ): A__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A__ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __A ( self ): pass @slow def __A ( self ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Optional[int] = CTRLModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) @unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :) def __A ( self ): pass @require_torch class _a (unittest.TestCase ): '''simple docstring''' def __A ( self ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __A ( self ): A__ : Union[str, Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(A__ ) A__ : Union[str, Any] = torch.tensor( [[1_1859, 0, 1611, 8]] , dtype=torch.long , device=A__ ) # Legal the president is A__ : int = [ 1_1859, 0, 1611, 8, 5, 150, 2_6449, 2, 19, 348, 469, 3, 2595, 48, 2_0740, 24_6533, 24_6533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a A__ : Union[str, Any] = model.generate(A__ , do_sample=A__ ) self.assertListEqual(output_ids[0].tolist() , A__ )
192
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained( _UpperCAmelCase , architectures=['RobertaPreLayerNormForMaskedLM']) # convert state_dict SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=_UpperCAmelCase , filename='pytorch_model.bin')) SCREAMING_SNAKE_CASE = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.'): SCREAMING_SNAKE_CASE = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'): continue SCREAMING_SNAKE_CASE = tensor_value SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_UpperCAmelCase , config=_UpperCAmelCase , state_dict=_UpperCAmelCase) model.save_pretrained(_UpperCAmelCase) # convert tokenizer SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCAmelCase) tokenizer.save_pretrained(_UpperCAmelCase) if __name__ == "__main__": a_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : int = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
137
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
368
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a , __a , __a = None , ) -> List[str]: '''simple docstring''' super().__init__() self.register_modules(transformer=__a , vae=__a , scheduler=__a) # create a imagenet -> id dictionary for easier use _UpperCamelCase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''','''): _UpperCamelCase = int(__a) _UpperCamelCase = dict(sorted(self.labels.items())) def UpperCAmelCase ( self , __a) -> List[int]: '''simple docstring''' if not isinstance(__a , __a): _UpperCamelCase = list(__a) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''') return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' _UpperCamelCase = len(__a) _UpperCamelCase = self.transformer.config.sample_size _UpperCamelCase = self.transformer.config.in_channels _UpperCamelCase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , ) _UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents _UpperCamelCase = torch.tensor(__a , device=self.device).reshape(-1) _UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device) _UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__a) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: _UpperCamelCase = latent_model_input[: len(__a) // 2] _UpperCamelCase = torch.cat([half, half] , dim=0) _UpperCamelCase = self.scheduler.scale_model_input(__a , __a) _UpperCamelCase = t if not torch.is_tensor(__a): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) _UpperCamelCase = latent_model_input.device.type == '''mps''' if isinstance(__a , __a): _UpperCamelCase = torch.floataa if is_mps else torch.floataa else: _UpperCamelCase = torch.intaa if is_mps else torch.intaa _UpperCamelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device) elif len(timesteps.shape) == 0: _UpperCamelCase = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _UpperCamelCase = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output _UpperCamelCase = self.transformer( __a , timestep=__a , class_labels=__a).sample # perform guidance if guidance_scale > 1: _UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] _UpperCamelCase , _UpperCamelCase = torch.split(__a , len(__a) // 2 , dim=0) _UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) _UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0) _UpperCamelCase = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: _UpperCamelCase , _UpperCamelCase = torch.split(__a , __a , dim=1) else: _UpperCamelCase = noise_pred # compute previous image: x_t -> x_t-1 _UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample if guidance_scale > 1: _UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0) else: _UpperCamelCase = latent_model_input _UpperCamelCase = 1 / self.vae.config.scaling_factor * latents _UpperCamelCase = self.vae.decode(__a).sample _UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _UpperCamelCase = self.numpy_to_pil(__a) if not return_dict: return (samples,) return ImagePipelineOutput(images=__a)
100
0
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class _lowerCAmelCase : def __init__(self ): A_ : int = {} def _a (self , lowercase , lowercase , lowercase=1 ): if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A_ : Tuple = [[w, v]] if not self.graph.get(lowercase ): A_ : Union[str, Any] = [] def _a (self ): return list(self.graph ) def _a (self , lowercase , lowercase ): if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def _a (self , lowercase=-2 , lowercase=-1 ): if s == d: return [] A_ : Union[str, Any] = [] A_ : Dict = [] if s == -2: A_ : List[Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Tuple = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A_ : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A_ : List[str] = stack[len(lowercase ) - 1] else: A_ : str = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def _a (self , lowercase=-1 ): if c == -1: A_ : Tuple = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A_ : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def _a (self , lowercase=-2 ): A_ : Union[str, Any] = deque() A_ : Tuple = [] if s == -2: A_ : int = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A_ : Any = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _a (self , lowercase ): A_ : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _a (self , lowercase ): return len(self.graph[u] ) def _a (self , lowercase=-2 ): A_ : Dict = [] A_ : Optional[Any] = [] if s == -2: A_ : int = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Optional[Any] = s A_ : int = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Optional[int] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A_ : int = stack[len(lowercase ) - 1] else: A_ : Optional[Any] = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def _a (self ): A_ : Dict = [] A_ : Tuple = [] A_ : Any = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Optional[int] = -2 A_ : List[Any] = [] A_ : List[str] = s A_ : Optional[int] = False A_ : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Dict = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : List[str] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : str = True if len(lowercase ) != 0: A_ : Union[str, Any] = stack[len(lowercase ) - 1] else: A_ : Tuple = False indirect_parents.append(lowercase ) A_ : Tuple = s A_ : Tuple = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def _a (self ): A_ : Union[str, Any] = [] A_ : str = [] A_ : List[Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[Any] = -2 A_ : Tuple = [] A_ : Optional[Any] = s A_ : Union[str, Any] = False A_ : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : int = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Dict = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : List[str] = True if len(lowercase ) != 0: A_ : Dict = stack[len(lowercase ) - 1] else: A_ : int = False indirect_parents.append(lowercase ) A_ : List[Any] = s A_ : int = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def _a (self , lowercase=-2 , lowercase=-1 ): A_ : str = time() self.dfs(lowercase , lowercase ) A_ : Any = time() return end - begin def _a (self , lowercase=-2 ): A_ : Union[str, Any] = time() self.bfs(lowercase ) A_ : Optional[Any] = time() return end - begin class _lowerCAmelCase : def __init__(self ): A_ : List[str] = {} def _a (self , lowercase , lowercase , lowercase=1 ): # check if the u exists if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A_ : int = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A_ : Any = [[w, u]] def _a (self , lowercase , lowercase ): if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def _a (self , lowercase=-2 , lowercase=-1 ): if s == d: return [] A_ : Dict = [] A_ : List[Any] = [] if s == -2: A_ : str = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A_ : Dict = stack[len(lowercase ) - 1] else: A_ : List[str] = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def _a (self , lowercase=-1 ): if c == -1: A_ : Union[str, Any] = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A_ : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def _a (self , lowercase=-2 ): A_ : int = deque() A_ : Optional[Any] = [] if s == -2: A_ : Optional[int] = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A_ : Union[str, Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _a (self , lowercase ): return len(self.graph[u] ) def _a (self ): A_ : Optional[int] = [] A_ : Dict = [] A_ : Union[str, Any] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : List[Any] = -2 A_ : List[str] = [] A_ : int = s A_ : Optional[int] = False A_ : Any = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Tuple = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : int = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : str = True if len(lowercase ) != 0: A_ : Any = stack[len(lowercase ) - 1] else: A_ : str = False indirect_parents.append(lowercase ) A_ : Dict = s A_ : List[Any] = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def _a (self ): A_ : Optional[int] = [] A_ : Optional[int] = [] A_ : List[str] = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A_ : Any = -2 A_ : Any = [] A_ : Tuple = s A_ : str = False A_ : str = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A_ : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A_ : Any = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() A_ : Union[str, Any] = True if len(lowercase ) != 0: A_ : List[Any] = stack[len(lowercase ) - 1] else: A_ : int = False indirect_parents.append(lowercase ) A_ : int = s A_ : int = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def _a (self ): return list(self.graph ) def _a (self , lowercase=-2 , lowercase=-1 ): A_ : Any = time() self.dfs(lowercase , lowercase ) A_ : Optional[Any] = time() return end - begin def _a (self , lowercase=-2 ): A_ : List[Any] = time() self.bfs(lowercase ) A_ : List[Any] = time() return end - begin
206
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ): A_ : str = bp_numa A_ : Optional[int] = bp_numa A_ : Optional[Any] = bp_numa A_ : str = conva_get[:2] A_ : Union[str, Any] = conva_get[2] A_ : Union[str, Any] = size_pa A_ : List[str] = rate_w A_ : Dict = rate_t A_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] A_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) A_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) A_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1 A_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1 A_ : Any = -2 * np.random.rand(self.num_bpa ) + 1 def _a (self , lowercase ): # save model dict with pickle A_ : Union[str, Any] = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowercase , """wb""" ) as f: pickle.dump(lowercase , lowercase ) print(F'Model saved: {save_path}' ) @classmethod def _a (cls , lowercase ): # read saved model with open(lowercase , """rb""" ) as f: A_ : Optional[int] = pickle.load(lowercase ) # noqa: S301 A_ : Optional[int] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) A_ : Tuple = model_dic.get("""size_pooling1""" ) A_ : Optional[Any] = model_dic.get("""num_bp1""" ) A_ : List[str] = model_dic.get("""num_bp2""" ) A_ : Dict = model_dic.get("""num_bp3""" ) A_ : Tuple = model_dic.get("""rate_weight""" ) A_ : List[Any] = model_dic.get("""rate_thre""" ) # create model instance A_ : List[str] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) # modify model parameter A_ : int = model_dic.get("""w_conv1""" ) A_ : str = model_dic.get("""wkj""" ) A_ : str = model_dic.get("""vji""" ) A_ : int = model_dic.get("""thre_conv1""" ) A_ : Union[str, Any] = model_dic.get("""thre_bp2""" ) A_ : List[Any] = model_dic.get("""thre_bp3""" ) return conv_ins def _a (self , lowercase ): return 1 / (1 + np.exp(-1 * x )) def _a (self , lowercase ): return round(lowercase , 3 ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ): # convolution process A_ : Dict = convs[0] A_ : Any = convs[1] A_ : Tuple = np.shape(lowercase )[0] # get the data slice of original image data, data_focus A_ : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , lowercase ): for j_focus in range(0 , size_data - size_conv + 1 , lowercase ): A_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix A_ : int = [] A_ : List[str] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowercase ): A_ : List[Any] = [] for i_focus in range(len(lowercase ) ): A_ : List[str] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowercase ) ) A_ : Tuple = np.asmatrix(lowercase ).reshape( lowercase , lowercase ) data_featuremap.append(lowercase ) # expanding the data slice to One dimenssion A_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowercase ) ) A_ : Dict = np.asarray(lowercase ) return focus_list, data_featuremap def _a (self , lowercase , lowercase , lowercase="average_pool" ): # pooling process A_ : Union[str, Any] = len(featuremaps[0] ) A_ : str = int(size_map / size_pooling ) A_ : List[str] = [] for i_map in range(len(lowercase ) ): A_ : Any = featuremaps[i_map] A_ : Any = [] for i_focus in range(0 , lowercase , lowercase ): for j_focus in range(0 , lowercase , lowercase ): A_ : Tuple = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowercase ) ) A_ : List[str] = np.asmatrix(lowercase ).reshape(lowercase , lowercase ) featuremap_pooled.append(lowercase ) return featuremap_pooled def _a (self , lowercase ): # expanding three dimension data to one dimension list A_ : List[Any] = [] for i in range(len(lowercase ) ): A_ : Tuple = np.shape(data[i] ) A_ : str = data[i].reshape(1 , shapes[0] * shapes[1] ) A_ : Tuple = data_listed.getA().tolist()[0] data_expanded.extend(lowercase ) A_ : Optional[Any] = np.asarray(lowercase ) return data_expanded def _a (self , lowercase ): # expanding matrix to one dimension list A_ : str = np.asarray(lowercase ) A_ : Any = np.shape(lowercase ) A_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : List[str] = [] A_ : Union[str, Any] = 0 for i_map in range(lowercase ): A_ : Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , lowercase , lowercase ): for j in range(0 , lowercase , lowercase ): A_ : str = pd_pool[ i_pool ] A_ : Optional[Any] = i_pool + 1 A_ : Union[str, Any] = np.multiply( lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(lowercase ) return pd_all def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ): # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowercase )) ) print((""" - - Shape: Teach_Data """, np.shape(lowercase )) ) A_ : Optional[Any] = 0 A_ : Dict = [] A_ : List[Any] = 10000 while rp < n_repeat and mse >= error_accuracy: A_ : List[Any] = 0 print(F'-------------Learning Time {rp}--------------' ) for p in range(len(lowercase ) ): # print('------------Learning Image: %d--------------'%p) A_ : Optional[Any] = np.asmatrix(datas_train[p] ) A_ : str = np.asarray(datas_teach[p] ) A_, A_ : Dict = self.convolute( lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A_ : Any = self.pooling(lowercase , self.size_poolinga ) A_ : int = np.shape(lowercase ) A_ : Union[str, Any] = self._expand(lowercase ) A_ : Dict = data_bp_input A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa A_ : Any = self.sig(lowercase ) A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa A_ : List[str] = self.sig(lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- A_ : Optional[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) ) A_ : Tuple = np.multiply( np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) ) A_ : Union[str, Any] = np.dot(lowercase , self.vji ) A_ : int = pd_i_all / (self.size_poolinga * self.size_poolinga) A_ : str = pd_conva_pooled.T.getA().tolist() A_ : List[Any] = self._calculate_gradient_from_pool( lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): A_ : int = self._expand_mat(pd_conva_all[k_conv] ) A_ : Any = self.rate_weight * np.dot(lowercase , lowercase ) A_ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) A_ : Tuple = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer A_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight A_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre A_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image A_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) A_ : List[Any] = rp + 1 A_ : Union[str, Any] = error_count / patterns all_mse.append(lowercase ) def draw_error(): A_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowercase , """+-""" ) plt.plot(lowercase , """r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowercase , alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') ) if draw_e: draw_error() return mse def _a (self , lowercase ): # model predict A_ : Tuple = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowercase )) ) for p in range(len(lowercase ) ): A_ : int = np.asmatrix(datas_test[p] ) A_, A_ : str = self.convolute( lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A_ : Dict = self.pooling(lowercase , self.size_poolinga ) A_ : Tuple = self._expand(lowercase ) A_ : int = data_bp_input A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa A_ : List[str] = self.sig(lowercase ) A_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa A_ : List[Any] = self.sig(lowercase ) produce_out.extend(bp_outa.getA().tolist() ) A_ : Any = [list(map(self.do_round , lowercase ) ) for each in produce_out] return np.asarray(lowercase ) def _a (self , lowercase ): # return the data of image after convoluting process so we can check it out A_ : Optional[Any] = np.asmatrix(lowercase ) A_, A_ : Optional[Any] = self.convolute( lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) A_ : Union[str, Any] = self.pooling(lowercase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
206
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowercase_ : """simple docstring""" def __init__( self : Dict ,lowercase__ : int ,lowercase__ : Union[str, Any]=1_3 ,lowercase__ : Tuple=7 ,lowercase__ : Tuple=False ,lowercase__ : Tuple=True ,lowercase__ : Union[str, Any]=False ,lowercase__ : str=False ,lowercase__ : Tuple=1_9 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Optional[int]=4 ,lowercase__ : str=3_7 ,lowercase__ : int="gelu" ,lowercase__ : str=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : int=1_6 ,lowercase__ : int=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=3 ,lowercase__ : int=4 ,lowercase__ : Any=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = ids_tensor([self.batch_size] ,self.num_choices ) __lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = EsmConfig( vocab_size=3_3 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=lowercase__ ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,) return config def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Tuple ): __lowercase = EsmForProteinFolding(config=lowercase__ ).float() model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) __lowercase = model(lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Tuple = (EsmForProteinFolding,) if is_torch_available() else () SCREAMING_SNAKE_CASE : str = () SCREAMING_SNAKE_CASE : Tuple = {} if is_torch_available() else {} SCREAMING_SNAKE_CASE : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = EsmFoldModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : int ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) @unittest.skip('''Does not support attention outputs''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): pass @unittest.skip def SCREAMING_SNAKE_CASE ( self : Tuple ): pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip('''Esm does not support embedding resizing''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''ESMFold does not support head pruning.''' ) def SCREAMING_SNAKE_CASE ( self : str ): pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE ( self : str ): pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @unittest.skip('''ESMFold only has one output format.''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass @unittest.skip('''ESMFold does not support input chunking.''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): pass @require_torch class lowercase_ (lowerCamelCase__ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() __lowercase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) __lowercase = model(lowercase__ )['''positions'''] __lowercase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] ,dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,lowercase__ ,atol=1e-4 ) )
359
'''simple docstring''' def _A ( A__ ): """simple docstring""" stooge(A__ , 0 , len(A__ ) - 1 ) return arr def _A ( A__ , A__ , A__ ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: __lowercase , __lowercase = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: __lowercase = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(A__ , A__ , (h - t) ) # Recursively sort last 2/3 elements stooge(A__ , i + t , (A__) ) # Recursively sort first 2/3 elements stooge(A__ , A__ , (h - t) ) if __name__ == "__main__": lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
52
0
from __future__ import annotations import bisect def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ): if hi < 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ ) while lo < hi: __SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1 else: __SCREAMING_SNAKE_CASE : Optional[Any] = mid return lo def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ): if hi < 0: __SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) while lo < hi: __SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __SCREAMING_SNAKE_CASE : Any = mid + 1 else: __SCREAMING_SNAKE_CASE : Optional[int] = mid return lo def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ): sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ) def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ): sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ) def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1 while left <= right: __SCREAMING_SNAKE_CASE : str = left + (right - left) // 2 __SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __SCREAMING_SNAKE_CASE : int = midpoint - 1 else: __SCREAMING_SNAKE_CASE : Dict = midpoint + 1 return None def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ ) if index != len(lowercase__ ) and sorted_collection[index] == item: return index return None def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if right < left: return None __SCREAMING_SNAKE_CASE : int = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 ) else: return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ ) if __name__ == "__main__": __lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip() __lowerCAmelCase : str =sorted(int(item) for item in user_input.split(',')) __lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n')) __lowerCAmelCase : Tuple =binary_search(collection, target) if result is None: print(f"""{target} was not found in {collection}.""") else: print(f"""{target} was found at position {result} in {collection}.""")
9
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration snake_case_ : Union[str, Any] = 50_00_00 snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__) snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str: UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ ) @get_duration def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) UpperCAmelCase_ : Dict = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ ) def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples['''text'''] ) UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''numpy''' ): UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''pandas''' ): UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''torch''', columns='''numbers''' ): UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ): UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
125
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = inspect.getfile(accelerate.test_utils ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices.' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices.' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(F'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = Accelerator() __UpperCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10) __UpperCamelCase : Union[str, Any] = torch.randint(0, 10, shape).to(accelerator.device) __UpperCamelCase : List[Any] = '''''' __UpperCamelCase : str = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __UpperCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __UpperCamelCase : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
361
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[Any] = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
18
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
0
import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): for attribute in key.split('''.''' ): A_ : Optional[int] = getattr(_a , _a ) if weight_type is not None: A_ : Optional[Any] = getattr(_a , _a ).shape else: A_ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ : Tuple = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Optional[Any] = value elif weight_type == "bias": A_ : Union[str, Any] = value else: A_ : Optional[Any] = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Any = [] A_ : List[Any] = fairseq_model.state_dict() A_ : List[str] = hf_model.feature_extractor A_ : Tuple = hf_model.adapter for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , ) A_ : Optional[int] = True elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ): load_adapter(_a , _a , _a , _a ) A_ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: A_ : Union[str, Any] = True if "*" in mapped_key: A_ : Dict = name.split(_a )[0].split('''.''' )[-2] A_ : Optional[int] = mapped_key.replace('''*''' , _a ) if "weight_g" in name: A_ : Optional[Any] = '''weight_g''' elif "weight_v" in name: A_ : str = '''weight_v''' elif "bias" in name: A_ : List[str] = '''bias''' elif "weight" in name: A_ : List[Any] = '''weight''' else: A_ : Any = None set_recursively(_a , _a , _a , _a , _a ) continue if not is_used: unused_weights.append(_a ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[int] = full_name.split('''conv_layers.''' )[-1] A_ : List[str] = name.split('''.''' ) A_ : Optional[int] = int(items[0] ) A_ : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ : Optional[int] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) A_ : Optional[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ : Optional[int] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_a ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = full_name.split('''adaptor.''' )[-1] A_ : List[str] = name.split('''.''' ) if items[1].isdigit(): A_ : List[Any] = int(items[1] ) else: A_ : Optional[int] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' A_ : List[Any] = value logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' A_ : Any = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' A_ : Tuple = value logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' A_ : Optional[Any] = value logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(_a , _a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' A_ : Union[str, Any] = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' A_ : int = value logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(_a ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ , A_ : int = emb.weight.shape A_ : Any = nn.Linear(_a , _a , bias=_a ) A_ : str = emb.weight.data return lin_layer @torch.no_grad() def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): A_ : str = WavaVecaConfig.from_pretrained( _a , add_adapter=_a , adapter_stride=_a , adapter_kernel_size=_a , use_auth_token=_a , output_hidden_size=_a , ) A_ : Union[str, Any] = MBartConfig.from_pretrained(_a ) # load model A_ , A_ , A_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ '''config_yaml''': config_yaml_path, '''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path, '''load_pretrained_decoder_from''': None, } , ) A_ : Union[str, Any] = model[0].eval() # load feature extractor A_ : Any = WavaVecaFeatureExtractor.from_pretrained(_a , use_auth_token=_a ) # set weights for wav2vec2 encoder A_ : str = WavaVecaModel(_a ) recursively_load_weights_wavaveca(model.encoder , _a ) # load decoder weights A_ : List[str] = MBartForCausalLM(_a ) A_ , A_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a ) logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) A_ : List[Any] = SpeechEncoderDecoderModel(encoder=_a , decoder=_a ) A_ : int = False A_ : Union[str, Any] = MBartaaTokenizer(_a ) tokenizer.save_pretrained(_a ) A_ : str = hf_wavavec.config.to_dict() A_ : Dict = tokenizer.pad_token_id A_ : List[Any] = tokenizer.bos_token_id A_ : Dict = tokenizer.eos_token_id A_ : Optional[int] = '''mbart50''' A_ : int = '''wav2vec2''' A_ : Any = tokenizer.eos_token_id A_ : List[Any] = 250_004 A_ : List[Any] = tokenizer.eos_token_id A_ : Dict = SpeechEncoderDecoderConfig.from_dict(_a ) hf_wavavec.save_pretrained(_a ) feature_extractor.save_pretrained(_a ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""") UpperCamelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
360
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowerCamelCase : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Any: '''simple docstring''' A_ : List[Any] = parent A_ : int = batch_size A_ : str = seq_length A_ : int = is_training A_ : Any = use_token_type_ids A_ : Union[str, Any] = use_labels A_ : Any = vocab_size A_ : Dict = hidden_size A_ : Dict = num_hidden_layers A_ : int = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : Dict = hidden_act A_ : List[str] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Optional[int] = type_vocab_size A_ : str = type_sequence_label_size A_ : Tuple = initializer_range A_ : Union[str, Any] = num_labels A_ : List[str] = num_choices A_ : Union[str, Any] = scope A_ : Any = self.vocab_size - 1 def _snake_case ( self )->Any: '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_token_type_ids: A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : str = None A_ : Union[str, Any] = None A_ : Optional[int] = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple: '''simple docstring''' A_ : int = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) A_ : Any = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]: '''simple docstring''' A_ : int = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' A_ : List[Any] = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->str: '''simple docstring''' A_ : Any = self.num_labels A_ : List[Any] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self )->int: '''simple docstring''' A_ : Dict = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[int] = config_and_inputs A_ : int = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Optional[int]: '''simple docstring''' A_ : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) A_ : List[Any] = inputs_dict['''labels'''] A_ : Any = inputs_dict['''labels'''] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) A_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def _snake_case ( self )->Any: '''simple docstring''' A_ : Any = OpenAIGPTModelTester(self ) A_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 ) def _snake_case ( self )->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self )->Tuple: '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->List[Any]: '''simple docstring''' A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Any: '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Tuple: '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )->List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self )->Tuple: '''simple docstring''' A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is A_ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : Dict = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE ) self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
65
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" if not isinstance(__snake_case, __snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(__snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(__snake_case ) == 1: return True _UpperCamelCase = series[1] - series[0] for index in range(len(__snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if not isinstance(__snake_case, __snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(__snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _UpperCamelCase = 0 for val in series: answer += val return answer / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
194
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a , __a , __a , ) -> Optional[int]: '''simple docstring''' super().__init__() _UpperCamelCase = value_function _UpperCamelCase = unet _UpperCamelCase = scheduler _UpperCamelCase = env _UpperCamelCase = env.get_dataset() _UpperCamelCase = {} for key in self.data.keys(): try: _UpperCamelCase = self.data[key].mean() except: # noqa: E722 pass _UpperCamelCase = {} for key in self.data.keys(): try: _UpperCamelCase = self.data[key].std() except: # noqa: E722 pass _UpperCamelCase = env.observation_space.shape[0] _UpperCamelCase = env.action_space.shape[0] def UpperCAmelCase ( self , __a , __a) -> int: '''simple docstring''' return (x_in - self.means[key]) / self.stds[key] def UpperCAmelCase ( self , __a , __a) -> List[str]: '''simple docstring''' return x_in * self.stds[key] + self.means[key] def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' if type(__a) is dict: return {k: self.to_torch(__a) for k, v in x_in.items()} elif torch.is_tensor(__a): return x_in.to(self.unet.device) return torch.tensor(__a , device=self.unet.device) def UpperCAmelCase ( self , __a , __a , __a) -> str: '''simple docstring''' for key, val in cond.items(): _UpperCamelCase = val.clone() return x_in def UpperCAmelCase ( self , __a , __a , __a , __a) -> int: '''simple docstring''' _UpperCamelCase = x.shape[0] _UpperCamelCase = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model _UpperCamelCase = torch.full((batch_size,) , __a , device=self.unet.device , dtype=torch.long) for _ in range(__a): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _UpperCamelCase = self.value_function(x.permute(0 , 2 , 1) , __a).sample _UpperCamelCase = torch.autograd.grad([y.sum()] , [x])[0] _UpperCamelCase = self.scheduler._get_variance(__a) _UpperCamelCase = torch.exp(0.5 * posterior_variance) _UpperCamelCase = model_std * grad _UpperCamelCase = 0 _UpperCamelCase = x.detach() _UpperCamelCase = x + scale * grad _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.unet(x.permute(0 , 2 , 1) , __a).sample.permute(0 , 2 , 1) # TODO: verify deprecation of this kwarg _UpperCamelCase = self.scheduler.step(__a , __a , __a , predict_epsilon=__a)['''prev_sample'''] # apply conditions to the trajectory (set the initial state) _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.to_torch(__a) return x, y def __call__( self , __a , __a=64 , __a=32 , __a=2 , __a=0.1) -> Optional[Any]: '''simple docstring''' # normalize the observations and create batch dimension _UpperCamelCase = self.normalize(__a , '''observations''') _UpperCamelCase = obs[None].repeat(__a , axis=0) _UpperCamelCase = {0: self.to_torch(__a)} _UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _UpperCamelCase = randn_tensor(__a , device=self.unet.device) _UpperCamelCase = self.reset_xa(__a , __a , self.action_dim) _UpperCamelCase = self.to_torch(__a) # run the diffusion process _UpperCamelCase , _UpperCamelCase = self.run_diffusion(__a , __a , __a , __a) # sort output trajectories by value _UpperCamelCase = y.argsort(0 , descending=__a).squeeze() _UpperCamelCase = x[sorted_idx] _UpperCamelCase = sorted_values[:, :, : self.action_dim] _UpperCamelCase = actions.detach().cpu().numpy() _UpperCamelCase = self.de_normalize(__a , key='''actions''') # select the action with the highest value if y is not None: _UpperCamelCase = 0 else: # if we didn't run value guiding, select a random action _UpperCamelCase = np.random.randint(0 , __a) _UpperCamelCase = denorm_actions[selected_index, 0] return denorm_actions
194
1
'''simple docstring''' from __future__ import annotations a_ : Union[str, Any] = [] def a_ ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(len(__snake_case ) ): if board[row][i] == 1: return False for i in range(len(__snake_case ) ): if board[i][column] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ): if board[i][j] == 1: return False return True def a_ ( __snake_case : list[list[int]] , __snake_case : int ) -> bool: """simple docstring""" if row >= len(__snake_case ): solution.append(__snake_case ) printboard(__snake_case ) print() return True for i in range(len(__snake_case ) ): if is_safe(__snake_case , __snake_case , __snake_case ): lowerCamelCase_ =1 solve(__snake_case , row + 1 ) lowerCamelCase_ =0 return False def a_ ( __snake_case : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__snake_case ) ): for j in range(len(__snake_case ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) a_ : List[str] = 8 a_ : Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
"""simple docstring""" def a__ ( snake_case__ ) -> list: lowerCamelCase = [0] * len(snake_case__ ) for i in range(1 , len(snake_case__ ) ): # use last results for better performance - dynamic programming lowerCamelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowerCamelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowerCamelCase = j return prefix_result def a__ ( snake_case__ ) -> int: return max(prefix_function(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
291
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase : int = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ): """simple docstring""" super().__init__(**_a ) lowerCamelCase = size if size is not None else {"""shortest_edge""": 256} lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = resample lowerCamelCase = do_center_crop lowerCamelCase = crop_size lowerCamelCase = do_rescale lowerCamelCase = rescale_factor lowerCamelCase = do_normalize lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ): """simple docstring""" lowerCamelCase = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ): """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ): """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ): """simple docstring""" lowerCamelCase = do_resize if do_resize is not None else self.do_resize lowerCamelCase = size if size is not None else self.size lowerCamelCase = get_size_dict(_a , default_to_square=_a ) lowerCamelCase = resample if resample is not None else self.resample lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase = crop_size if crop_size is not None else self.crop_size lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" ) lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase = image_mean if image_mean is not None else self.image_mean lowerCamelCase = image_std if image_std is not None else self.image_std lowerCamelCase = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase = [to_numpy_array(_a ) for image in images] if do_resize: lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images] lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): lowerCamelCase = target_sizes.numpy() lowerCamelCase = [] for idx in range(len(_a ) ): lowerCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) lowerCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCamelCase = logits.argmax(dim=1 ) lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
291
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) _SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCAmelCase_ ( _A ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A ) SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(_A , _A ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_A , '''__name__''' , _A ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' ) if hasattr(_A , _A ): return getattr(_A , _A ) return None def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = get_file_from_repo( _A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(_A , encoding='''utf-8''' ) as reader: return json.load(_A ) class UpperCAmelCase__ : """simple docstring""" def __init__( self : List[Any] ) -> int: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__lowerCamelCase ) def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor'''] SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase ) if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase ) if os.path.isdir(__lowerCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )] return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str: IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
366
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = { '''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''', } class UpperCAmelCase__ ( A__ ): """simple docstring""" a = "transfo-xl" a = ["mems"] a = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Any , __lowerCamelCase : int=26_7735 , __lowerCamelCase : Any=[2_0000, 4_0000, 20_0000] , __lowerCamelCase : Dict=1024 , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : int=4 , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=18 , __lowerCamelCase : Optional[int]=1600 , __lowerCamelCase : str=1000 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : str="normal" , __lowerCamelCase : List[str]=0.01 , __lowerCamelCase : Any=0.01 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : Union[str, Any]=0 , **__lowerCamelCase : int , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = [] self.cutoffs.extend(__lowerCamelCase ) if proj_share_all_but_first: SCREAMING_SNAKE_CASE__ = [False] + [True] * len(self.cutoffs ) else: SCREAMING_SNAKE_CASE__ = [False] + [False] * len(self.cutoffs ) SCREAMING_SNAKE_CASE__ = d_model SCREAMING_SNAKE_CASE__ = d_embed SCREAMING_SNAKE_CASE__ = d_head SCREAMING_SNAKE_CASE__ = d_inner SCREAMING_SNAKE_CASE__ = div_val SCREAMING_SNAKE_CASE__ = pre_lnorm SCREAMING_SNAKE_CASE__ = n_layer SCREAMING_SNAKE_CASE__ = n_head SCREAMING_SNAKE_CASE__ = mem_len SCREAMING_SNAKE_CASE__ = same_length SCREAMING_SNAKE_CASE__ = attn_type SCREAMING_SNAKE_CASE__ = clamp_len SCREAMING_SNAKE_CASE__ = sample_softmax SCREAMING_SNAKE_CASE__ = adaptive SCREAMING_SNAKE_CASE__ = dropout SCREAMING_SNAKE_CASE__ = dropatt SCREAMING_SNAKE_CASE__ = untie_r SCREAMING_SNAKE_CASE__ = init SCREAMING_SNAKE_CASE__ = init_range SCREAMING_SNAKE_CASE__ = proj_init_std SCREAMING_SNAKE_CASE__ = init_std SCREAMING_SNAKE_CASE__ = layer_norm_epsilon super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @property def lowercase_ ( self : str ) -> Dict: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowercase_ ( self : List[str] , __lowerCamelCase : Any ) -> List[Any]: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
218
0
'''simple docstring''' class lowerCAmelCase__ : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = name __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = weight def __repr__( self : str ) -> Union[str, Any]: """simple docstring""" return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" return self.value def UpperCAmelCase__ ( self : Any ) -> str: """simple docstring""" return self.name def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.weight def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" return self.value / self.weight def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [] for i in range(len(a__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = sorted(a__ , key=a__ , reverse=a__ ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0, 0.0 for i in range(len(a__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a__ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
267
'''simple docstring''' def a__ ( a__ ): """simple docstring""" if isinstance(a__ , a__ ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if isinstance(a__ , a__ ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if num == 0: return "0b0" __SCREAMING_SNAKE_CASE = False if num < 0: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = -num __SCREAMING_SNAKE_CASE = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(a__ ) for e in binary ) return "0b" + "".join(str(a__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
267
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __A = 6_378_137.0 __A = 6_356_752.314_245 __A = 6378137 def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _A = atan((1 - flattening) * tan(radians(_lowercase ) ) ) _A = atan((1 - flattening) * tan(radians(_lowercase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _A = haversine_distance(_lowercase , _lowercase , _lowercase , _lowercase ) / EQUATORIAL_RADIUS # Intermediate P and Q values _A = (b_lata + b_lata) / 2 _A = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _A = (sin(_lowercase ) ** 2) * (cos(_lowercase ) ** 2) _A = cos(sigma / 2 ) ** 2 _A = (sigma - sin(_lowercase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _A = (cos(_lowercase ) ** 2) * (sin(_lowercase ) ** 2) _A = sin(sigma / 2 ) ** 2 _A = (sigma + sin(_lowercase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
75
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @property def __A ( self: Dict ) -> Union[str, Any]: torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self: Any ) -> Union[str, Any]: _A = self.dummy_uncond_unet _A = ScoreSdeVeScheduler() _A = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[ 0 ] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> Any: _A = '''google/ncsnpp-church-256''' _A = UNetaDModel.from_pretrained(__A ) _A = ScoreSdeVeScheduler.from_pretrained(__A ) _A = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) _A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = "▁" SCREAMING_SNAKE_CASE__ = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } SCREAMING_SNAKE_CASE__ = { "facebook/nllb-200-distilled-600M": 1_024, } # fmt: off SCREAMING_SNAKE_CASE__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , lowercase = None , lowercase=None , lowercase=False , **lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase = legacy_behaviour super().__init__( bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , tokenizer_file=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase , **lowercase , ) lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase ) ) lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase = 1 lowerCAmelCase = len(self.sp_model ) lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase ) } lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn""" lowerCAmelCase = self.lang_code_to_id[self._src_lang] lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Tuple: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowercase ) -> int: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _snake_case ( self ) -> Optional[int]: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _snake_case ( self ) -> str: return self._src_lang @src_lang.setter def _snake_case ( self , lowercase ) -> None: lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) lowerCAmelCase = [1] * len(self.prefix_tokens ) lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase )) + suffix_ones return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowerCAmelCase = src_lang lowerCAmelCase = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase ) lowerCAmelCase = self.convert_tokens_to_ids(lowercase ) lowerCAmelCase = tgt_lang_id return inputs def _snake_case ( self ) -> Dict: lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase ) -> List[str]: return self.sp_model.encode(lowercase , out_type=lowercase ) def _snake_case ( self , lowercase ) -> Optional[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase = self.sp_model.PieceToId(lowercase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , lowercase ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , lowercase ) -> List[str]: lowerCAmelCase = """""".join(lowercase ).replace(lowercase , """ """ ).strip() return out_string def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowercase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,) def _snake_case ( self , lowercase , lowercase = "eng_Latn" , lowercase = None , lowercase = "fra_Latn" , **lowercase , ) -> BatchEncoding: lowerCAmelCase = src_lang lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase ) def _snake_case ( self ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _snake_case ( self ) -> Union[str, Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _snake_case ( self , lowercase ) -> None: lowerCAmelCase = self.lang_code_to_id[src_lang] if self.legacy_behaviour: lowerCAmelCase = [] lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: lowerCAmelCase = [self.cur_lang_code] lowerCAmelCase = [self.eos_token_id] def _snake_case ( self , lowercase ) -> None: lowerCAmelCase = self.lang_code_to_id[lang] if self.legacy_behaviour: lowerCAmelCase = [] lowerCAmelCase = [self.eos_token_id, self.cur_lang_code] else: lowerCAmelCase = [self.cur_lang_code] lowerCAmelCase = [self.eos_token_id]
46
"""simple docstring""" import re import string import numpy as np import datasets SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" SCREAMING_SNAKE_CASE__ = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def _snake_case ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]: if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] ) lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] ) else: lowerCAmelCase = np.asarray(lowercase ) lowerCAmelCase = np.asarray(lowercase ) if ignore_case: lowerCAmelCase = np.char.lower(lowercase ) lowerCAmelCase = np.char.lower(lowercase ) if ignore_punctuation: lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation ) lowerCAmelCase = np.char.translate(lowercase , table=lowercase ) lowerCAmelCase = np.char.translate(lowercase , table=lowercase ) if ignore_numbers: lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits ) lowerCAmelCase = np.char.translate(lowercase , table=lowercase ) lowerCAmelCase = np.char.translate(lowercase , table=lowercase ) lowerCAmelCase = predictions == references return {"exact_match": np.mean(lowercase ) * 100}
46
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase_ = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
356
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
344
0
"""simple docstring""" from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = str(_A ) return len(_A ) == 9 and set(_A ) == set("123456789" ) def a__ ( ): """simple docstring""" for base_num in range(9_999 , 4_999 , -1 ): UpperCamelCase = 100_002 * base_num if is_9_pandigital(_A ): return candidate for base_num in range(333 , 99 , -1 ): UpperCamelCase = 1_002_003 * base_num if is_9_pandigital(_A ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
153
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
0
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = (1 - _cos) / 2 lowercase_ = 1 - _cos lowercase_ = 1 + alpha lowercase_ = -2 * _cos lowercase_ = 1 - alpha lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = (1 + _cos) / 2 lowercase_ = -1 - _cos lowercase_ = 1 + alpha lowercase_ = -2 * _cos lowercase_ = 1 - alpha lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = _sin / 2 lowercase_ = 0 lowercase_ = -ba lowercase_ = 1 + alpha lowercase_ = -2 * _cos lowercase_ = 1 - alpha lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = 1 - alpha lowercase_ = -2 * _cos lowercase_ = 1 + alpha lowercase_ = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = 10 ** (gain_db / 40) lowercase_ = 1 + alpha * big_a lowercase_ = -2 * _cos lowercase_ = 1 - alpha * big_a lowercase_ = 1 + alpha / big_a lowercase_ = -2 * _cos lowercase_ = 1 - alpha / big_a lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = 10 ** (gain_db / 40) lowercase_ = (big_a + 1) - (big_a - 1) * _cos lowercase_ = (big_a + 1) + (big_a - 1) * _cos lowercase_ = (big_a - 1) - (big_a + 1) * _cos lowercase_ = (big_a - 1) + (big_a + 1) * _cos lowercase_ = 2 * sqrt(__lowerCamelCase ) * alpha lowercase_ = big_a * (pmc + aaa) lowercase_ = 2 * big_a * mpc lowercase_ = big_a * (pmc - aaa) lowercase_ = ppmc + aaa lowercase_ = -2 * pmpc lowercase_ = ppmc - aaa lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ): '''simple docstring''' lowercase_ = tau * frequency / samplerate lowercase_ = sin(__lowerCamelCase ) lowercase_ = cos(__lowerCamelCase ) lowercase_ = _sin / (2 * q_factor) lowercase_ = 10 ** (gain_db / 40) lowercase_ = (big_a + 1) - (big_a - 1) * _cos lowercase_ = (big_a + 1) + (big_a - 1) * _cos lowercase_ = (big_a - 1) - (big_a + 1) * _cos lowercase_ = (big_a - 1) + (big_a + 1) * _cos lowercase_ = 2 * sqrt(__lowerCamelCase ) * alpha lowercase_ = big_a * (ppmc + aaa) lowercase_ = -2 * big_a * pmpc lowercase_ = big_a * (ppmc - aaa) lowercase_ = pmc + aaa lowercase_ = 2 * mpc lowercase_ = pmc - aaa lowercase_ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
362
import sys def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ): '''simple docstring''' lowercase_ = len(__lowerCamelCase ) lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )] lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )] for chain_length in range(2 , __lowerCamelCase ): for a in range(1 , n - chain_length + 1 ): lowercase_ = a + chain_length - 1 lowercase_ = sys.maxsize for c in range(__lowerCamelCase , __lowerCamelCase ): lowercase_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: lowercase_ = cost lowercase_ = c return matrix, sol def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ): '''simple docstring''' if i == j: print("A" + str(__lowerCamelCase ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] ) print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase ) print(")" , end=" " ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = [30, 35, 15, 5, 10, 20, 25] lowercase_ = len(__lowerCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(__lowerCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
297
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __A = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase__: Optional[int] ="lm_head" lowerCamelCase__: Dict =getattr(__a , __a ) if weight_type is not None: lowerCamelCase__: str =getattr(__a , __a ).shape else: lowerCamelCase__: int =hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__: Dict =value elif weight_type == "weight_g": lowerCamelCase__: Optional[Any] =value elif weight_type == "weight_v": lowerCamelCase__: int =value elif weight_type == "bias": lowerCamelCase__: List[str] =value else: lowerCamelCase__: Union[str, Any] =value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =[] lowerCamelCase__: List[str] =fairseq_model.state_dict() lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__: int =False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__: str =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase__: Optional[Any] =True if "*" in mapped_key: lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2] lowerCamelCase__: List[str] =mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCamelCase__: List[str] ="weight_g" elif "weight_v" in name: lowerCamelCase__: Union[str, Any] ="weight_v" elif "bias" in name: lowerCamelCase__: Dict ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__: Tuple ="weight" else: lowerCamelCase__: List[Any] =None set_recursively(__a , __a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1] lowerCamelCase__: List[str] =name.split("." ) lowerCamelCase__: str =int(items[0] ) lowerCamelCase__: Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__: Dict =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCamelCase__: List[Any] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__: List[str] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int: """simple docstring""" if config_path is not None: lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a ) else: lowerCamelCase__: List[Any] =UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase__: str =Dictionary.load_from_json(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__: Any =target_dict.pad_index lowerCamelCase__: int =target_dict.bos_index lowerCamelCase__: Any =target_dict.eos_index lowerCamelCase__: Dict =len(target_dict.symbols ) lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" ) if not os.path.isdir(__a ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) ) return os.makedirs(__a , exist_ok=__a ) lowerCamelCase__: Optional[Any] =target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__: Optional[Any] =42 lowerCamelCase__: List[Any] =43 with open(__a , "w" , encoding="utf-8" ) as vocab_handle: json.dump(__a , __a ) lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer( __a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , ) lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False lowerCamelCase__: Tuple =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a ) processor.save_pretrained(__a ) lowerCamelCase__: int =UniSpeechForCTC(__a ) else: lowerCamelCase__: int =UniSpeechForPreTraining(__a ) if is_finetuned: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCamelCase__: List[str] =model[0].eval() recursively_load_weights(__a , __a , __a ) hf_unispeech.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
"""simple docstring""" import re import string import numpy as np import datasets __A : Union[str, Any] = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' __A : Optional[Any] = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' __A : Union[str, Any] = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): def a_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase : Union[str, Any] = np.array([re.sub(SCREAMING_SNAKE_CASE_ , """""" , SCREAMING_SNAKE_CASE_ ) for x in predictions] ) UpperCamelCase : List[str] = np.array([re.sub(SCREAMING_SNAKE_CASE_ , """""" , SCREAMING_SNAKE_CASE_ ) for x in references] ) else: UpperCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ ) if ignore_case: UpperCamelCase : Optional[int] = np.char.lower(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = np.char.lower(SCREAMING_SNAKE_CASE_ ) if ignore_punctuation: UpperCamelCase : int = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase : List[str] = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) if ignore_numbers: UpperCamelCase : Any = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase : Dict = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = predictions == references return {"exact_match": np.mean(SCREAMING_SNAKE_CASE_ ) * 100}
27
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Optional[Any] = 16 __A : str = 32 def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ): '''simple docstring''' UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCamelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(snake_case_ : List[Any] ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase : Optional[Any] = datasets.map( snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase : Optional[Any] = 1_6 elif accelerator.mixed_precision != "no": UpperCamelCase : Any = 8 else: UpperCamelCase : Optional[Any] = None return tokenizer.pad( snake_case_ ,padding="""longest""" ,max_length=snake_case_ ,pad_to_multiple_of=snake_case_ ,return_tensors="""pt""" ,) # Instantiate dataloaders. UpperCamelCase : str = DataLoader( tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ ) UpperCamelCase : Dict = DataLoader( tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : int = mocked_dataloaders # noqa: F811 def A_ ( snake_case_ : Tuple ,snake_case_ : Dict ): '''simple docstring''' # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case_ ) == "1": UpperCamelCase : Union[str, Any] = 2 # New Code # UpperCamelCase : Dict = int(args.gradient_accumulation_steps ) UpperCamelCase : List[Any] = int(args.local_sgd_steps ) # Initialize accelerator UpperCamelCase : str = Accelerator( cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case_ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase : Union[str, Any] = config["""lr"""] UpperCamelCase : int = int(config["""num_epochs"""] ) UpperCamelCase : int = int(config["""seed"""] ) UpperCamelCase : List[Any] = int(config["""batch_size"""] ) UpperCamelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" ) set_seed(snake_case_ ) UpperCamelCase , UpperCamelCase : Dict = get_dataloaders(snake_case_ ,snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase : Tuple = model.to(accelerator.device ) # Instantiate optimizer UpperCamelCase : List[Any] = AdamW(params=model.parameters() ,lr=snake_case_ ) # Instantiate scheduler UpperCamelCase : str = get_linear_schedule_with_warmup( optimizer=snake_case_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(snake_case_ ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() with LocalSGD( accelerator=snake_case_ ,model=snake_case_ ,local_sgd_steps=snake_case_ ,enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case_ ): UpperCamelCase : Optional[Any] = model(**snake_case_ ) UpperCamelCase : Optional[int] = output.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase : Any = model(**snake_case_ ) UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 ) UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case_ ,references=snake_case_ ,) UpperCamelCase : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' ,snake_case_ ) def A_ ( ): '''simple docstring''' UpperCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) # New Code # parser.add_argument( """--gradient_accumulation_steps""" ,type=snake_case_ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,) parser.add_argument( """--local_sgd_steps""" ,type=snake_case_ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(snake_case_ ,snake_case_ ) if __name__ == "__main__": main()
27
1