code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import string import numpy def _a ( UpperCAmelCase , UpperCAmelCase ) -> str: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE_ ) class __SCREAMING_SNAKE_CASE : _UpperCAmelCase : Dict = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _UpperCAmelCase : int = numpy.vectorize(lambda lowerCAmelCase_ : x % 3_6 ) _UpperCAmelCase : List[str] = numpy.vectorize(__snake_case ) def __init__( self : Optional[int] , A : List[str] ) ->Tuple: lowerCamelCase__ : Optional[Any] = self.modulus(A ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowerCamelCase__ : str = encrypt_key.shape[0] def __lowerCamelCase ( self : Any , A : str ) ->Any: return self.key_string.index(A ) def __lowerCamelCase ( self : Any , A : Dict ) ->Any: return self.key_string[round(A )] def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]: lowerCamelCase__ : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase__ : Dict = det % len(self.key_string ) lowerCamelCase__ : str = len(self.key_string ) if greatest_common_divisor(A , len(self.key_string ) ) != 1: lowerCamelCase__ : Union[str, Any] = ( F"determinant modular {req_l} of encryption key({det}) " F"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(A ) def __lowerCamelCase ( self : int , A : Tuple ) ->Dict: lowerCamelCase__ : Tuple = [char for char in text.upper() if char in self.key_string] lowerCamelCase__ : Any = chars[-1] while len(A ) % self.break_key != 0: chars.append(A ) return "".join(A ) def __lowerCamelCase ( self : Optional[int] , A : str ) ->Dict: lowerCamelCase__ : Any = self.process_text(text.upper() ) lowerCamelCase__ : Tuple = '' for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ): lowerCamelCase__ : int = text[i : i + self.break_key] lowerCamelCase__ : Optional[int] = [self.replace_letters(A ) for char in batch] lowerCamelCase__ : str = numpy.array([vec] ).T lowerCamelCase__ : str = self.modulus(self.encrypt_key.dot(A ) ).T.tolist()[ 0 ] lowerCamelCase__ : Any = ''.join( self.replace_digits(A ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __lowerCamelCase ( self : Optional[int] ) ->Tuple: lowerCamelCase__ : List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase__ : List[str] = det % len(self.key_string ) lowerCamelCase__ : Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowerCamelCase__ : Dict = i break lowerCamelCase__ : Any = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(A ) ) def __lowerCamelCase ( self : Tuple , A : Optional[Any] ) ->str: lowerCamelCase__ : Tuple = self.make_decrypt_key() lowerCamelCase__ : Tuple = self.process_text(text.upper() ) lowerCamelCase__ : Tuple = '' for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ): lowerCamelCase__ : Optional[int] = text[i : i + self.break_key] lowerCamelCase__ : Dict = [self.replace_letters(A ) for char in batch] lowerCamelCase__ : int = numpy.array([vec] ).T lowerCamelCase__ : Tuple = self.modulus(decrypt_key.dot(A ) ).T.tolist()[0] lowerCamelCase__ : Optional[Any] = ''.join( self.replace_digits(A ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def _a ( ) -> Any: """simple docstring""" lowerCamelCase__ : Dict = int(input('''Enter the order of the encryption key: ''' ) ) lowerCamelCase__ : List[str] = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ : Any = [int(SCREAMING_SNAKE_CASE_ ) for x in input().split()] hill_matrix.append(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ : List[Any] = HillCipher(numpy.array(SCREAMING_SNAKE_CASE_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) lowerCamelCase__ : List[Any] = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": lowerCamelCase__ : Optional[int] = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(SCREAMING_SNAKE_CASE_ ) ) elif option == "2": lowerCamelCase__ : Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
142
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class SCREAMING_SNAKE_CASE__ : def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ): lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : Union[str, Any] = seq_length lowercase__ : Optional[Any] = is_training lowercase__ : Tuple = use_input_mask lowercase__ : List[str] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : List[str] = vocab_size lowercase__ : Optional[int] = hidden_size lowercase__ : List[str] = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : int = intermediate_size lowercase__ : Any = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[Any] = type_sequence_label_size lowercase__ : str = initializer_range lowercase__ : List[str] = num_labels lowercase__ : Union[str, Any] = num_choices lowercase__ : Optional[int] = scope def snake_case_ ( self): lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : List[Any] = None if self.use_input_mask: lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : int = None lowercase__ : Optional[int] = None lowercase__ : Optional[int] = None if self.use_labels: lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices) lowercase__ : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self): lowercase__ : str = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def snake_case_ ( self , a , a , a , a , a , a): lowercase__ : Dict = EsmForProteinFolding(config=a).float() model.to(a) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a) lowercase__ : Dict = model(a) lowercase__ : int = model(a) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2)) def snake_case_ ( self): lowercase__ : List[str] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = config_and_inputs lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ): __lowerCamelCase : Dict = False __lowerCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else () __lowerCamelCase : Union[str, Any] = () __lowerCamelCase : List[Any] = {} if is_torch_available() else {} __lowerCamelCase : Optional[Any] = False def snake_case_ ( self): lowercase__ : Tuple = EsmFoldModelTester(self) lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=37) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) @unittest.skip('Does not support attention outputs') def snake_case_ ( self): pass @unittest.skip def snake_case_ ( self): pass @unittest.skip('Esm does not support embedding resizing') def snake_case_ ( self): pass @unittest.skip('Esm does not support embedding resizing') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support passing input embeds!') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support head pruning.') def snake_case_ ( self): pass @unittest.skip('ESMFold does not output hidden states in the normal way.') def snake_case_ ( self): pass @unittest.skip('ESMfold does not output hidden states in the normal way.') def snake_case_ ( self): pass @unittest.skip('ESMFold only has one output format.') def snake_case_ ( self): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality') def snake_case_ ( self): pass @unittest.skip('ESMFold does not support input chunking.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.') def snake_case_ ( self): pass @unittest.skip('ESMFold doesn\'t support data parallel.') def snake_case_ ( self): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def snake_case_ ( self): pass @require_torch class SCREAMING_SNAKE_CASE__ (__snake_case ): @slow def snake_case_ ( self): lowercase__ : Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float() model.eval() lowercase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) lowercase__ : Optional[int] = model(a)['positions'] lowercase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4))
214
0
"""simple docstring""" from collections.abc import Sequence def _lowercase ( __snake_case ,__snake_case = False ) -> float: if not arr: return 0 __lowerCAmelCase : Any = 0 if allow_empty_subarrays else float("-inf" ) __lowerCAmelCase : Union[str, Any] = 0.0 for num in arr: __lowerCAmelCase : int = max(0 if allow_empty_subarrays else num ,curr_sum + num ) __lowerCAmelCase : Optional[Any] = max(__snake_case ,__snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __snake_case : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"""{max_subarray_sum(nums) = }""")
365
"""simple docstring""" def _lowercase ( __snake_case ) -> int: if not isinstance(__snake_case ,__snake_case ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
58
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
import logging from transformers.configuration_utils import PretrainedConfig _lowerCamelCase : Optional[Any] = logging.getLogger(__name__) class __UpperCAmelCase ( lowerCamelCase__ ): UpperCamelCase = """masked_bert""" def __init__( self : Dict, __A : Optional[Any]=3_0_5_2_2, __A : Optional[int]=7_6_8, __A : Optional[Any]=1_2, __A : List[str]=1_2, __A : List[Any]=3_0_7_2, __A : str="gelu", __A : Union[str, Any]=0.1, __A : Optional[Any]=0.1, __A : str=5_1_2, __A : Union[str, Any]=2, __A : int=0.0_2, __A : List[str]=1E-12, __A : Dict=0, __A : Optional[Any]="topK", __A : int="constant", __A : Any=0.0, **__A : int, ): super().__init__(pad_token_id=__A, **__A ) UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : str = intermediate_size UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Any = max_position_embeddings UpperCAmelCase : int = type_vocab_size UpperCAmelCase : List[str] = initializer_range UpperCAmelCase : int = layer_norm_eps UpperCAmelCase : List[str] = pruning_method UpperCAmelCase : Union[str, Any] = mask_init UpperCAmelCase : str = mask_scale
370
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def a__ ( UpperCAmelCase : int ) -> Dict: # A local function to see if a dot lands in the circle. def is_in_circle(UpperCAmelCase : float , UpperCAmelCase : float ) -> bool: UpperCAmelCase : Dict = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase : List[str] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(UpperCAmelCase ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase : int = proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def a__ ( UpperCAmelCase : int , UpperCAmelCase : Callable[[float], float] , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , ) -> float: return mean( function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value) def a__ ( UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 ) -> None: def identity_function(UpperCAmelCase : float ) -> float: return x UpperCAmelCase : int = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def a__ ( UpperCAmelCase : int ) -> None: def function_to_integrate(UpperCAmelCase : float ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase : Optional[int] = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
99
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class A_ ( unittest.TestCase ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : str=1_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_0 ,SCREAMING_SNAKE_CASE__ : List[str]=4_0_0 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[Any]=[0.5, 0.5, 0.5] ,): __lowerCamelCase : Any = size if size is not None else {'shortest_edge': 1_8} __lowerCamelCase : int = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : List[Any] = image_size __lowerCamelCase : List[Any] = min_resolution __lowerCamelCase : Any = max_resolution __lowerCamelCase : Optional[int] = do_resize __lowerCamelCase : Optional[int] = size __lowerCamelCase : List[Any] = do_center_crop __lowerCamelCase : Tuple = crop_size __lowerCamelCase : List[str] = do_normalize __lowerCamelCase : Optional[Any] = image_mean __lowerCamelCase : int = image_std def lowerCAmelCase ( self : List[str]): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : List[Any] = LevitImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : int): __lowerCamelCase : Optional[int] = LevitImageProcessingTester(self) @property def lowerCAmelCase ( self : int): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : List[str]): __lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_center_crop')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8}) self.assertEqual(image_processor.crop_size ,{'height': 1_8, 'width': 1_8}) __lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4) self.assertEqual(image_processor.size ,{'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size ,{'height': 8_4, 'width': 8_4}) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def lowerCAmelCase ( self : Dict): # Initialize image_processing __lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
73
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets a ="""\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ a ="""\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ a =""" Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: __lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Any = np.array(lowerCamelCase__ ) __lowerCamelCase : List[Any] = np.array(lowerCamelCase__ ) __lowerCamelCase : Any = en_sentvecs.shape[0] # mean centering __lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) __lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) __lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' ) __lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) ) __lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0] __lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def lowerCAmelCase ( self : Optional[Any]): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]') return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('int64') if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32')), 'references': datasets.Value('int64') if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32')), }) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]')
73
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : str = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deberta-v2' def __init__( self : Dict , __a : List[str]=12_81_00 , __a : int=15_36 , __a : Tuple=24 , __a : Dict=24 , __a : Any=61_44 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : Optional[Any]=5_12 , __a : int=0 , __a : Tuple=0.02 , __a : Tuple=1e-7 , __a : List[str]=False , __a : Tuple=-1 , __a : Dict=0 , __a : Tuple=True , __a : Dict=None , __a : int=0 , __a : Tuple="gelu" , **__a : Optional[Any] , ): super().__init__(**__a ) _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = relative_attention _a = max_relative_positions _a = pad_token_id _a = position_biased_input # Backwards compatibility if type(__a ) == str: _a = [x.strip() for x in pos_att_type.lower().split("|" )] _a = pos_att_type _a = vocab_size _a = layer_norm_eps _a = kwargs.get("pooler_hidden_size" , __a ) _a = pooler_dropout _a = pooler_hidden_act class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Tuple ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def UpperCamelCase__ ( self : List[Any] ): return 12 def UpperCamelCase__ ( self : Tuple , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , __a : "PreTrainedTokenizerBase" = None , ): _a = super().generate_dummy_inputs(preprocessor=__a , framework=__a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
354
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]: _a = test_results.split(" " ) _a = 0 _a = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _a = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(lowercase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def _lowerCamelCase ( lowercase : str ) -> Optional[Any]: _a = {} _a = None _a = False for line in failures_short_lines.split("\n" ): if re.search(r"_ \[doctest\]" , lowercase ): _a = True _a = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): _a = line _a = False return failures class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Tuple , __a : str , __a : Dict ): _a = title _a = doc_test_results["time_spent"].split("," )[0] _a = doc_test_results["success"] _a = doc_test_results["failures"] _a = self.n_success + self.n_failures # Failures and success of the modeling tests _a = doc_test_results @property def UpperCamelCase__ ( self : int ): _a = [self._time_spent] _a = 0 for time in time_spent: _a = time.split(":" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(__a ) == 1: _a = [0, 0, time_parts[0]] _a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds _a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'{int(__a )}h{int(__a )}m{int(__a )}s' @property def UpperCamelCase__ ( self : Optional[Any] ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def UpperCamelCase__ ( self : Optional[Any] ): return { "type": "section", "text": { "type": "plain_text", "text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def UpperCamelCase__ ( self : List[str] ): return { "type": "section", "text": { "type": "plain_text", "text": ( f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' f' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def UpperCamelCase__ ( self : str ): _a = 40 _a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )} _a = "" for category, failures in category_failures.items(): if len(__a ) == 0: continue if report != "": report += "\n\n" report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(__a ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'The following examples had failures:\n\n\n{report}\n', }, } @property def UpperCamelCase__ ( self : List[str] ): _a = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(__a ) @staticmethod def UpperCamelCase__ ( ): _a = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(__a )} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , ) def UpperCamelCase__ ( self : Tuple ): print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(self.payload )} ) ) _a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed." _a = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , ) def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ): _a = "" for key, value in failures.items(): _a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value failures_text += f'*{key}*\n_{value}_\n\n' _a = job_name _a = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: _a = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def UpperCamelCase__ ( self : str ): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made." ) _a = self.doc_test_results.pop("job_link" ) self.doc_test_results.pop("failures" ) self.doc_test_results.pop("success" ) self.doc_test_results.pop("time_spent" ) _a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] ) for job, job_result in sorted_dict: if len(job_result["failures"] ): _a = f'*Num failures* :{len(job_result["failed"] )} \n' _a = job_result["failures"] _a = self.get_reply_blocks(__a , __a , __a , text=__a ) print("Sending the following reply" ) print(json.dumps({"blocks": blocks} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , ) time.sleep(1 ) def _lowerCamelCase ( ) -> Any: _a = os.environ["GITHUB_RUN_ID"] _a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' _a = requests.get(lowercase ).json() _a = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _a = math.ceil((result["total_count"] - 100) / 100 ) for i in range(lowercase ): _a = requests.get(url + F'&page={i + 2}' ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , lowercase ) return {} def _lowerCamelCase ( lowercase : str ) -> Dict: _a = {} if os.path.exists(lowercase ): _a = os.listdir(lowercase ) for file in files: try: with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f: _a = f.read() except UnicodeDecodeError as e: raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e return _artifact def _lowerCamelCase ( ) -> str: class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , __a : str ): _a = name _a = [] def __str__( self : List[str] ): return self.name def UpperCamelCase__ ( self : str , __a : str ): self.paths.append({"name": self.name, "path": path} ) _a = {} _a = filter(os.path.isdir , os.listdir() ) for directory in directories: _a = directory if artifact_name not in _available_artifacts: _a = Artifact(lowercase ) _available_artifacts[artifact_name].add_path(lowercase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase_ : List[Any] = get_job_links() lowerCAmelCase_ : Any = retrieve_available_artifacts() lowerCAmelCase_ : List[str] = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase_ : Optional[Any] = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests') lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0] lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name']) if "stats" in artifact: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats']) lowerCAmelCase_ : List[str] = failed lowerCAmelCase_ : Optional[Any] = success lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', ' lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): lowerCAmelCase_ : int = line.replace('FAILED ', '') lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '') if "::" in line: lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::') else: lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase_ : Union[str, Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A' lowerCAmelCase_ : Optional[Any] = failure break lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
346
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A ( __A , __A ): '''simple docstring''' @register_to_config def __init__(self : Any , *, _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , ) -> Any: """simple docstring""" super().__init__() lowercase__ = nn.Parameter(torch.zeros(lowercase_ ) ) # parameters for additional clip time embeddings lowercase__ = nn.Linear(lowercase_ , lowercase_ ) lowercase__ = nn.Linear(lowercase_ , lowercase_ ) # parameters for encoder hidden states lowercase__ = clip_extra_context_tokens lowercase__ = nn.Linear( lowercase_ , self.clip_extra_context_tokens * cross_attention_dim ) lowercase__ = nn.Linear(lowercase_ , lowercase_ ) lowercase__ = nn.LayerNorm(lowercase_ ) def lowerCamelCase__ (self : Optional[Any] , *, _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Any: """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowercase__ = image_embeddings.shape[0] lowercase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowercase__ = classifier_free_guidance_embeddings.expand( lowercase_ , -1 ) lowercase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowercase__ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowercase__ = self.embedding_proj(lowercase_ ) lowercase__ = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ ) lowercase__ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowercase__ = self.clip_extra_context_tokens_proj(lowercase_ ) lowercase__ = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens ) lowercase__ = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowercase__ = self.encoder_hidden_states_proj(lowercase_ ) lowercase__ = self.text_encoder_hidden_states_norm(lowercase_ ) lowercase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
305
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
0
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(_a ) class _lowerCamelCase( _a ): def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) requires_backends(self, 'vision') self.check_model_type(lowerCamelCase) def __call__( self, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]: """simple docstring""" return super().__call__(lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, **lowerCamelCase) -> List[Any]: """simple docstring""" return {}, {}, {} def UpperCamelCase ( self, lowerCamelCase) -> List[Any]: """simple docstring""" _lowercase : Union[str, Any] = load_image(lowerCamelCase) _lowercase : Optional[Any] = image.size _lowercase : Any = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) return model_inputs def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : List[Any] = self.model(**lowerCamelCase) return model_outputs def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" _lowercase : str = model_outputs.predicted_depth _lowercase : int = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1), size=self.image_size[::-1], mode='bicubic', align_corners=lowerCamelCase) _lowercase : int = prediction.squeeze().cpu().numpy() _lowercase : Union[str, Any] = (output * 2_55 / np.max(lowerCamelCase)).astype('uint8') _lowercase : List[str] = Image.fromarray(lowerCamelCase) _lowercase : str = {} _lowercase : Optional[Any] = predicted_depth _lowercase : str = depth return output_dict
84
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _lowerCamelCase: def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0) _lowercase : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : Optional[int] = UNetaDConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) _lowercase : Dict = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', ) torch.manual_seed(0) _lowercase : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self) -> List[str]: """simple docstring""" torch.manual_seed(0) _lowercase : List[str] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : List[str] = UNetaDConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', class_embed_type='timestep', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='gelu', time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) _lowercase : Optional[int] = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', ) torch.manual_seed(0) _lowercase : str = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, ) torch.manual_seed(0) _lowercase : Union[str, Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : List[Any] = self.get_dummy_components() _lowercase : List[str] = self.pipeline_class(**lowerCamelCase) pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : int = self.get_dummy_inputs(lowerCamelCase) _lowercase : int = inputs['prompt'] _lowercase : Dict = inputs['generator'] _lowercase : Optional[int] = inputs['num_inference_steps'] _lowercase : str = inputs['output_type'] if "image" in inputs: _lowercase : List[Any] = inputs['image'] else: _lowercase : List[Any] = None if "mask_image" in inputs: _lowercase : Union[str, Any] = inputs['mask_image'] else: _lowercase : Dict = None if "original_image" in inputs: _lowercase : Any = inputs['original_image'] else: _lowercase : Tuple = None _lowercase , _lowercase : str = pipe.encode_prompt(lowerCamelCase) # inputs with prompt converted to embeddings _lowercase : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowercase : int = image if mask_image is not None: _lowercase : str = mask_image if original_image is not None: _lowercase : Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : Dict = pipe(**lowerCamelCase)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase) _lowercase : Any = self.pipeline_class.from_pretrained(lowerCamelCase) pipe_loaded.to(lowerCamelCase) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase, lowerCamelCase) is None, F'''`{optional_component}` did not stay set to None after loading.''', ) _lowercase : Dict = self.get_dummy_inputs(lowerCamelCase) _lowercase : Optional[Any] = inputs['generator'] _lowercase : Any = inputs['num_inference_steps'] _lowercase : List[Any] = inputs['output_type'] # inputs with prompt converted to embeddings _lowercase : Optional[int] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowercase : str = image if mask_image is not None: _lowercase : Optional[int] = mask_image if original_image is not None: _lowercase : int = original_image _lowercase : str = pipe_loaded(**lowerCamelCase)[0] _lowercase : List[Any] = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max() self.assertLess(lowerCamelCase, 1E-4) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Optional[Any] = self.get_dummy_components() _lowercase : Any = self.pipeline_class(**lowerCamelCase) pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase) _lowercase : Tuple = pipe(**lowerCamelCase)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase) _lowercase : List[str] = self.pipeline_class.from_pretrained(lowerCamelCase) pipe_loaded.to(lowerCamelCase) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests _lowercase : int = self.get_dummy_inputs(lowerCamelCase) _lowercase : Tuple = pipe_loaded(**lowerCamelCase)[0] _lowercase : str = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max() self.assertLess(lowerCamelCase, 1E-4)
84
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowercase_ = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Dict: '''simple docstring''' A__ = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) A__ = finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' ) A__ = finetuning_task A__ = GLUE_TASKS_NUM_LABELS[finetuning_task] A__ = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE__ ) elif "squad" in finetuning_task: A__ = finetuning_task A__ = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) else: A__ = XLNetLMHeadModel(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model A__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) print(f'Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}' ) with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowercase_ = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
7
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase: Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class _lowercase ( lowerCAmelCase ): """simple docstring""" def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" a = {} a = {} if prompt is not None: a = prompt if generate_kwargs is not None: a = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: a = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) a = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" a = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' "Note also that one single text can be provided for conditional image to text generation." ) a = self.model.config.model_type if model_type == "git": a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids a = [self.tokenizer.cls_token_id] + input_ids a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''' ) else: a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: a = None return model_inputs def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , lowerCamelCase_ ) and all(x is None for x in model_inputs["input_ids"] ) ): a = None if generate_kwargs is None: a = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. a = model_inputs.pop(self.model.main_input_name ) a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = [] for output_ids in model_outputs: a = { "generated_text": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
227
0
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Union[str, Any]: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> List[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Optional[int]: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
365
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : str = {'vocab_file': 'vocab.txt'} a__ : Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a__ : Tuple = { 'YituTech/conv-bert-base': 5_1_2, 'YituTech/conv-bert-medium-small': 5_1_2, 'YituTech/conv-bert-small': 5_1_2, } a__ : str = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ConvBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
243
0
"""simple docstring""" from collections import namedtuple _a = namedtuple('from_to', 'from_ to') _a = { 'cubicmeter': from_to(1, 1), 'litre': from_to(0.001, 1_000), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.0_0454, 264.172), 'cubicyard': from_to(0.7_6455, 1.3_0795), 'cubicfoot': from_to(0.028, 35.3147), 'cup': from_to(0.0_0023_6588, 4226.75), } def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if from_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ", ".join(__lowerCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ", ".join(__lowerCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
61
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase : Optional[Any] = 16 UpperCAmelCase : Optional[Any] = 32 def __lowerCamelCase ( lowerCamelCase__ : List[str] ): '''simple docstring''' return int(x / 2**20 ) class __lowercase : """simple docstring""" def __enter__( self ) -> Optional[Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCamelCase = torch.cuda.memory_allocated() return self def __exit__( self , *A ) -> int: '''simple docstring''' gc.collect() torch.cuda.empty_cache() lowerCamelCase = torch.cuda.memory_allocated() lowerCamelCase = torch.cuda.max_memory_allocated() lowerCamelCase = bamb(self.end - self.begin ) lowerCamelCase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ): '''simple docstring''' lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ ) lowerCamelCase = load_dataset( """glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} ) def tokenize_function(lowerCamelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ): '''simple docstring''' lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase = config["""lr"""] lowerCamelCase = int(config["""num_epochs"""] ) lowerCamelCase = int(config["""seed"""] ) lowerCamelCase = int(config["""batch_size"""] ) lowerCamelCase = args.model_name_or_path set_seed(lowerCamelCase__ ) lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ ) # Instantiate optimizer lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase = 1 lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , ) else: lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase = 0 # Now we train the model lowerCamelCase = {} for epoch in range(lowerCamelCase__ , lowerCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCamelCase__ ): lowerCamelCase = model(**lowerCamelCase__ ) lowerCamelCase = outputs.loss lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , ) lowerCamelCase = parser.parse_args() lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
252
0
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __snake_case : def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _lowerCamelCase : Tuple = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _lowerCamelCase : int = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) _lowerCamelCase : int = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) _lowerCamelCase : str = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.get_dummy_inputs(__lowerCAmelCase ) _lowerCamelCase : int = inputs['''prompt'''] _lowerCamelCase : Any = inputs['''generator'''] _lowerCamelCase : Optional[int] = inputs['''num_inference_steps'''] _lowerCamelCase : str = inputs['''output_type'''] if "image" in inputs: _lowerCamelCase : Optional[Any] = inputs['''image'''] else: _lowerCamelCase : Tuple = None if "mask_image" in inputs: _lowerCamelCase : Union[str, Any] = inputs['''mask_image'''] else: _lowerCamelCase : Dict = None if "original_image" in inputs: _lowerCamelCase : Optional[Any] = inputs['''original_image'''] else: _lowerCamelCase : int = None _lowerCamelCase , _lowerCamelCase : Any = pipe.encode_prompt(__lowerCAmelCase ) # inputs with prompt converted to embeddings _lowerCamelCase : Dict = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: _lowerCamelCase : Any = image if mask_image is not None: _lowerCamelCase : Any = mask_image if original_image is not None: _lowerCamelCase : Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : int = self.pipeline_class.from_pretrained(__lowerCAmelCase ) pipe_loaded.to(__lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) _lowerCamelCase : Any = self.get_dummy_inputs(__lowerCAmelCase ) _lowerCamelCase : int = inputs['''generator'''] _lowerCamelCase : Union[str, Any] = inputs['''num_inference_steps'''] _lowerCamelCase : Union[str, Any] = inputs['''output_type'''] # inputs with prompt converted to embeddings _lowerCamelCase : Optional[int] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: _lowerCamelCase : Union[str, Any] = image if mask_image is not None: _lowerCamelCase : Tuple = mask_image if original_image is not None: _lowerCamelCase : str = original_image _lowerCamelCase : str = pipe_loaded(**__lowerCAmelCase )[0] _lowerCamelCase : Union[str, Any] = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max() self.assertLess(__lowerCAmelCase , 1E-4 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.get_dummy_components() _lowerCamelCase : List[str] = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.get_dummy_inputs(__lowerCAmelCase ) _lowerCamelCase : str = pipe(**__lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = self.pipeline_class.from_pretrained(__lowerCAmelCase ) pipe_loaded.to(__lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _lowerCamelCase : List[str] = self.get_dummy_inputs(__lowerCAmelCase ) _lowerCamelCase : List[str] = pipe_loaded(**__lowerCAmelCase )[0] _lowerCamelCase : Dict = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max() self.assertLess(__lowerCAmelCase , 1E-4 )
175
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( _lowercase , unittest.TestCase): # TODO: is there an appropriate internal test set? snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx" def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ): """simple docstring""" _lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase ) _lowerCamelCase : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() _lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = self.get_dummy_inputs() _lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images _lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Optional[int] = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.get_dummy_inputs() _lowerCamelCase : str = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.get_dummy_inputs() _lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Union[str, Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() _lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Optional[int] = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase): @property def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Optional[int] = ort.SessionOptions() _lowerCamelCase : List[str] = False return options def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default _lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = '''A fantasy landscape, trending on artstation''' _lowerCamelCase : List[Any] = torch.manual_seed(0 ) _lowerCamelCase : List[str] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , ) _lowerCamelCase : List[Any] = output.images _lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) ) _lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) _lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation''' _lowerCamelCase : int = torch.manual_seed(0 ) _lowerCamelCase : List[str] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , ) _lowerCamelCase : Union[str, Any] = output.images _lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
175
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: return "".join(chr(ord(lowercase_ ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
247
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int: if exponent == 1: return base if exponent % 2 == 0: A__ = _modexpt(lowercase_ , exponent // 2 , lowercase_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowercase_ , exponent - 1 , lowercase_ )) % modulo_value def _SCREAMING_SNAKE_CASE ( lowercase_ = 17_77 , lowercase_ = 18_55 , lowercase_ = 8 ) -> int: A__ = base for _ in range(1 , lowercase_ ): A__ = _modexpt(lowercase_ , lowercase_ , 10**digits ) return result if __name__ == "__main__": print(f'{solution() = }')
247
1
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' A__ = CodeGenTokenizer A__ = CodeGenTokenizerFast A__ = True A__ = {'''add_prefix_space''': True} A__ = False def lowerCamelCase__ (self : int ) -> List[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase__ = {"""unk_token""": """<unk>"""} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_UpperCAmelCase ) ) def lowerCamelCase__ (self : int , **_UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = """lower newer""" lowercase__ = """lower newer""" return input_text, output_text def lowerCamelCase__ (self : Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ = """lower newer""" lowercase__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase__ = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCamelCase__ (self : Optional[int] ) -> Any: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) lowercase__ = """lower newer""" # Testing tokenization lowercase__ = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids without special tokens lowercase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids with special tokens lowercase__ = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) lowercase__ = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowercase__ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing the unknown token lowercase__ = tokens + [rust_tokenizer.unk_token] lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCamelCase__ (self : List[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" pass def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Optional[int]=15 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input lowercase__ = """This is a simple input""" lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase__ = ("""This is a simple input""", """This is a pair""") lowercase__ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , ) def lowerCamelCase__ (self : List[Any] ) -> str: """simple docstring""" lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input lowercase__ = """This is a simple input""" lowercase__ = ["""This is a simple input looooooooong""", """This is a simple input"""] lowercase__ = ("""This is a simple input""", """This is a pair""") lowercase__ = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] lowercase__ = tokenizer.pad_token_id lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) lowercase__ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" ) lowercase__ = tokenizer(*_UpperCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) lowercase__ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ = """$$$""" lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase ) lowercase__ = """This is a simple input""" lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase__ = tokenizer.bos_token_id lowercase__ = tokenizer(_UpperCAmelCase ) lowercase__ = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ = tokenizer.decode(out_s.input_ids ) lowercase__ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def lowerCamelCase__ (self : List[Any] ) -> Any: """simple docstring""" lowercase__ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) lowercase__ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" lowercase__ = """\nif len_a > len_b: result = a\nelse: result = b""" lowercase__ = tokenizer.encode(_UpperCAmelCase ) lowercase__ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] lowercase__ = tokenizer.decode(_UpperCAmelCase , truncate_before_pattern=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Dict ) -> Optional[Any]: """simple docstring""" pass
352
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ['GLPNFeatureExtractor'] A : List[str] = ['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ 'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST', 'GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
146
0
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): if "model" in orig_key: __UpperCamelCase =orig_key.replace('model.' , '' ) if "norm1" in orig_key: __UpperCamelCase =orig_key.replace('norm1' , 'attention.output.LayerNorm' ) if "norm2" in orig_key: __UpperCamelCase =orig_key.replace('norm2' , 'output.LayerNorm' ) if "norm" in orig_key: __UpperCamelCase =orig_key.replace('norm' , 'LayerNorm' ) if "transformer" in orig_key: __UpperCamelCase =orig_key.split('.' )[0].split('_' )[-1] __UpperCamelCase =orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' ) if "mha.attn" in orig_key: __UpperCamelCase =orig_key.replace('mha.attn' , 'attention.self' ) if "mha" in orig_key: __UpperCamelCase =orig_key.replace('mha' , 'attention' ) if "W_q" in orig_key: __UpperCamelCase =orig_key.replace('W_q' , 'self.query' ) if "W_k" in orig_key: __UpperCamelCase =orig_key.replace('W_k' , 'self.key' ) if "W_v" in orig_key: __UpperCamelCase =orig_key.replace('W_v' , 'self.value' ) if "ff1" in orig_key: __UpperCamelCase =orig_key.replace('ff1' , 'intermediate.dense' ) if "ff2" in orig_key: __UpperCamelCase =orig_key.replace('ff2' , 'output.dense' ) if "ff" in orig_key: __UpperCamelCase =orig_key.replace('ff' , 'output.dense' ) if "mlm_class" in orig_key: __UpperCamelCase =orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' ) if "mlm" in orig_key: __UpperCamelCase =orig_key.replace('mlm' , 'cls.predictions.transform' ) if "cls" not in orig_key: __UpperCamelCase ='yoso.' + orig_key return orig_key def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ): for key in orig_state_dict.copy().keys(): __UpperCamelCase =orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __UpperCamelCase =val __UpperCamelCase =orig_state_dict['cls.predictions.decoder.bias'] __UpperCamelCase =torch.arange(SCREAMING_SNAKE_CASE__ ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): __UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model_state_dict'] __UpperCamelCase =YosoConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =YosoForMaskedLM(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE__ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE__ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _A = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
62
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __snake_case :Optional[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _A ( unittest.TestCase ): UpperCamelCase__ : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase__ : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCamelCase__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCamelCase__ : Tuple = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = ZeroShotClassificationPipeline( model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health''']) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' __a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''') self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE)]}) # No kwarg __a = classifier('''Who are you voting for in 2020?''' , ['''politics''']) self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE)]}) __a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''']) self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE)]}) __a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''') self.assertEqual( __SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0) __a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''']) self.assertEqual( __SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0) __a = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''') self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE)]}) # https://github.com/huggingface/transformers/issues/13846 __a = classifier(['''I am happy'''] , ['''positive''', '''negative''']) self.assertEqual( __SCREAMING_SNAKE_CASE , [ {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)]} for i in range(1) ] , ) __a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative''']) self.assertEqual( __SCREAMING_SNAKE_CASE , [ {'''sequence''': ANY(__SCREAMING_SNAKE_CASE), '''labels''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)], '''scores''': [ANY(__SCREAMING_SNAKE_CASE), ANY(__SCREAMING_SNAKE_CASE)]} for i in range(2) ] , ) with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier('''''' , candidate_labels='''politics''') with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier(__SCREAMING_SNAKE_CASE , candidate_labels='''politics''') with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''') with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier('''Who are you voting for in 2020?''' , candidate_labels=__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(__SCREAMING_SNAKE_CASE): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__SCREAMING_SNAKE_CASE , ) self.run_entailment_id(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Pipeline): '''simple docstring''' __a = zero_shot_classifier.model.config __a = config.labelaid __a = zero_shot_classifier.entailment_id __a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1) __a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0) __a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0) __a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2) __a = original_labelaid self.assertEqual(__SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id) @require_torch def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science''']) @require_torch def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) __a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def _lowerCamelCase ( self : Any): '''simple docstring''' __a = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) __a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def _lowerCamelCase ( self : str): '''simple docstring''' __a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''') __a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) __a = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''') __a = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) __a = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
131
def __snake_case ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('''Input value must be an \'int\' type''' ) __a = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
131
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } __A = { "google/electra-small-generator": 5_1_2, "google/electra-base-generator": 5_1_2, "google/electra-large-generator": 5_1_2, "google/electra-small-discriminator": 5_1_2, "google/electra-base-discriminator": 5_1_2, "google/electra-large-discriminator": 5_1_2, } __A = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = VOCAB_FILES_NAMES _UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Optional[Any] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Dict = ElectraTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars ): lowercase__: List[str] = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) ) lowercase__: Tuple = do_lower_case lowercase__: Optional[Any] = strip_accents lowercase__: int = tokenize_chinese_chars lowercase__: str = normalizer_class(**_UpperCAmelCase ) lowercase__: Union[str, Any] = do_lower_case def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): lowercase__: str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Dict = [self.sep_token_id] lowercase__: Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
177
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__: str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} lowercase__: Optional[Any] = parent lowercase__: List[Any] = batch_size lowercase__: Tuple = num_channels lowercase__: Optional[Any] = min_resolution lowercase__: Dict = max_resolution lowercase__: Optional[int] = do_resize lowercase__: Any = size lowercase__: Optional[Any] = do_normalize lowercase__: Union[str, Any] = image_mean lowercase__: Tuple = image_std lowercase__: str = do_rescale lowercase__: Any = rescale_factor lowercase__: List[Any] = do_pad def _snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if not batched: lowercase__: Optional[Any] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): lowercase__, lowercase__: Dict = image.size else: lowercase__, lowercase__: Optional[Any] = image.shape[1], image.shape[2] if w < h: lowercase__: List[str] = int(self.size['''shortest_edge'''] * h / w ) lowercase__: Union[str, Any] = self.size['''shortest_edge'''] elif w > h: lowercase__: int = self.size['''shortest_edge'''] lowercase__: int = int(self.size['''shortest_edge'''] * w / h ) else: lowercase__: Union[str, Any] = self.size['''shortest_edge'''] lowercase__: Union[str, Any] = self.size['''shortest_edge'''] else: lowercase__: Optional[int] = [] for image in image_inputs: lowercase__, lowercase__: int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__: Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] lowercase__: Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = YolosImageProcessor if is_vision_available() else None def _snake_case ( self ): lowercase__: int = YolosImageProcessingTester(self ) @property def _snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ): lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) ) def _snake_case ( self ): lowercase__: Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) lowercase__: Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) def _snake_case ( self ): pass def _snake_case ( self ): # Initialize image_processing lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input lowercase__: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__: Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processings lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) lowercase__: Optional[Any] = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors lowercase__: List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' ) lowercase__: Tuple = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) ) @slow def _snake_case ( self ): # prepare image and target lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowercase__: Any = json.loads(f.read() ) lowercase__: Dict = {'''image_id''': 39769, '''annotations''': target} # encode them lowercase__: Dict = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) lowercase__: Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: str = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) ) @slow def _snake_case ( self ): # prepare image, target and masks_path lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowercase__: str = json.loads(f.read() ) lowercase__: List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} lowercase__: Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowercase__: Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' ) lowercase__: Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: int = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify masks lowercase__: Union[str, Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
177
1
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = 'owlvit_text_model' def __init__( self , __lowerCamelCase=4_9408 , __lowerCamelCase=512 , __lowerCamelCase=2048 , __lowerCamelCase=12 , __lowerCamelCase=8 , __lowerCamelCase=16 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1.0 , __lowerCamelCase=0 , __lowerCamelCase=4_9406 , __lowerCamelCase=4_9407 , **__lowerCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) __A : Dict = vocab_size __A : Any = hidden_size __A : Union[str, Any] = intermediate_size __A : Any = num_hidden_layers __A : List[Any] = num_attention_heads __A : Dict = max_position_embeddings __A : Dict = hidden_act __A : Dict = layer_norm_eps __A : Optional[int] = attention_dropout __A : Optional[Any] = initializer_range __A : List[Any] = initializer_factor @classmethod def UpperCamelCase__( cls , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) __A , __A : str = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": __A : int = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = 'owlvit_vision_model' def __init__( self , __lowerCamelCase=768 , __lowerCamelCase=3072 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3 , __lowerCamelCase=768 , __lowerCamelCase=32 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1.0 , **__lowerCamelCase , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) __A : Dict = hidden_size __A : Dict = intermediate_size __A : Any = num_hidden_layers __A : List[Any] = num_attention_heads __A : int = num_channels __A : Any = image_size __A : Union[str, Any] = patch_size __A : Tuple = hidden_act __A : Optional[int] = layer_norm_eps __A : Dict = attention_dropout __A : Union[str, Any] = initializer_range __A : Optional[Any] = initializer_factor @classmethod def UpperCamelCase__( cls , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) __A , __A : Union[str, Any] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": __A : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = 'owlvit' _lowerCamelCase = True def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=512 , __lowerCamelCase=2.6_5_9_2 , __lowerCamelCase=True , **__lowerCamelCase , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if text_config is None: __A : Dict = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: __A : int = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) __A : Optional[Any] = OwlViTTextConfig(**__lowerCAmelCase ) __A : List[Any] = OwlViTVisionConfig(**__lowerCAmelCase ) __A : Dict = projection_dim __A : Tuple = logit_scale_init_value __A : Optional[int] = return_dict __A : int = 1.0 @classmethod def UpperCamelCase__( cls , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) __A , __A : Tuple = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) @classmethod def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' __A : int = {} __A : Optional[int] = text_config __A : List[str] = vision_config return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) def UpperCamelCase__( self ): '''simple docstring''' __A : List[Any] = copy.deepcopy(self.__dict__ ) __A : List[str] = self.text_config.to_dict() __A : int = self.vision_config.to_dict() __A : List[Any] = self.__class__.model_type return output class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCamelCase__( self ): '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def UpperCamelCase__( self ): '''simple docstring''' return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def UpperCamelCase__( self ): '''simple docstring''' return 1e-4 def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = None , ): '''simple docstring''' __A : Optional[Any] = super().generate_dummy_inputs( processor.tokenizer , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , framework=__lowerCAmelCase ) __A : Union[str, Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=__lowerCAmelCase , framework=__lowerCAmelCase ) return {**text_input_dict, **image_input_dict} @property def UpperCamelCase__( self ): '''simple docstring''' return 14
360
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _lowerCamelCase = MgpstrTokenizer _lowerCamelCase = False _lowerCamelCase = {} _lowerCamelCase = False def UpperCamelCase__( self ): '''simple docstring''' super().setUp() # fmt: off __A : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) __A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCamelCase ) + '''\n''' ) def UpperCamelCase__( self , **__lowerCamelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : List[str] = '''tester''' __A : Dict = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def UpperCamelCase__( self ): '''simple docstring''' pass def UpperCamelCase__( self ): '''simple docstring''' __A : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __A : Union[str, Any] = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) __A : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) __A : List[Any] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def UpperCamelCase__( self ): '''simple docstring''' __A : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __A , __A : str = self.get_input_output_texts(__lowerCamelCase ) __A : Union[str, Any] = tokenizer.tokenize(__lowerCamelCase ) __A : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) __A : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) __A : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertNotEqual(len(__lowerCamelCase ) , 0 ) __A : Union[str, Any] = tokenizer.decode(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , __lowerCamelCase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def UpperCamelCase__( self ): '''simple docstring''' pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def UpperCamelCase__( self ): '''simple docstring''' pass
291
0
'''simple docstring''' import string def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = '''''' for i in sequence: lowercase__ = ord(A ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = string.ascii_letters lowercase__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(A )] if c in letters else c for c in sequence ) def _SCREAMING_SNAKE_CASE () -> None: """simple docstring""" from timeit import timeit print('''Running performance benchmarks...''' ) lowercase__ = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=A )} seconds" ) print(f"> atbash(): {timeit('atbash(printable)' , setup=A )} seconds" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f"""{example} encrypted in atbash: {atbash(example)}""") benchmark()
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def _A ( ): """simple docstring""" lowerCAmelCase__ = input("Enter message: " ) lowerCAmelCase__ = input("Enter key [alphanumeric]: " ) lowerCAmelCase__ = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): lowerCAmelCase__ = "encrypt" lowerCAmelCase__ = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith("d" ): lowerCAmelCase__ = "decrypt" lowerCAmelCase__ = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) print(F'\n{mode.title()}ed message:' ) print(lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): """simple docstring""" return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" ) def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): """simple docstring""" return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" ) def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = key.upper() for symbol in message: lowerCAmelCase__ = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCAmelCase_ ): lowerCAmelCase__ = 0 else: translated.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": main()
221
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def a ( self : Dict ) -> Optional[int]: lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = BlipImageProcessor() lowerCAmelCase__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCAmelCase__ = BlipaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(self.tmpdirname ) def a ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer def a ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor def a ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def a ( self : List[Any] ) -> Any: lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self : str ) -> Dict: lowerCAmelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCAmelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) lowerCAmelCase__ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def a ( self : int ) -> str: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" ) lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self : Tuple ) -> int: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = processor(text=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self : Dict ) -> str: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def a ( self : str ) -> List[str]: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def a ( self : List[str] ) -> Any: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
221
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" @property def A ( self : List[Any] ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase : List[Any] = self.dummy_uncond_unet UpperCAmelCase : Optional[int] = PNDMScheduler() UpperCAmelCase : Any = PNDMPipeline(unet=__snake_case , scheduler=__snake_case ) pndm.to(__snake_case ) pndm.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase : Any = pndm(generator=__snake_case , num_inference_steps=20 , output_type='''numpy''' ).images UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : List[Any] = pndm(generator=__snake_case , num_inference_steps=20 , output_type='''numpy''' , return_dict=__snake_case )[0] UpperCAmelCase : Dict = image[0, -3:, -3:, -1] UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : Optional[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" def A ( self : Optional[int] ) -> str: UpperCAmelCase : str = '''google/ddpm-cifar10-32''' UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(__snake_case ) UpperCAmelCase : List[str] = PNDMScheduler() UpperCAmelCase : int = PNDMPipeline(unet=__snake_case , scheduler=__snake_case ) pndm.to(__snake_case ) pndm.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase : Tuple = torch.manual_seed(0 ) UpperCAmelCase : Any = pndm(generator=__snake_case , output_type='''numpy''' ).images UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : List[Any] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
23
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Tuple = logging.get_logger(__name__) # General docstring a__ : List[Any] = "RegNetConfig" # Base docstring a__ : Dict = "facebook/regnet-y-040" a__ : Optional[int] = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : Union[str, Any] = "facebook/regnet-y-040" a__ : Union[str, Any] = "tabby, tabby cat" a__ : int = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( nn.Module): def __init__( self :Union[str, Any] , _A :int , _A :int , _A :int = 3 , _A :int = 1 , _A :int = 1 , _A :Optional[str] = "relu" , ) -> int: '''simple docstring''' super().__init__() __A = nn.Convad( _A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , ) __A = nn.BatchNormad(_A ) __A = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ ( self :Tuple , _A :Union[str, Any] ) -> int: '''simple docstring''' __A = self.convolution(_A ) __A = self.normalization(_A ) __A = self.activation(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Optional[int] , _A :RegNetConfig ) -> List[str]: '''simple docstring''' super().__init__() __A = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __A = config.num_channels def lowercase_ ( self :Any , _A :Optional[int] ) -> Optional[int]: '''simple docstring''' __A = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __A = self.embedder(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Optional[int] , _A :int , _A :int , _A :int = 2 ) -> Any: '''simple docstring''' super().__init__() __A = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A ) __A = nn.BatchNormad(_A ) def lowercase_ ( self :Optional[int] , _A :Tensor ) -> Tensor: '''simple docstring''' __A = self.convolution(_A ) __A = self.normalization(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Optional[Any] , _A :int , _A :int ) -> List[str]: '''simple docstring''' super().__init__() __A = nn.AdaptiveAvgPoolad((1, 1) ) __A = nn.Sequential( nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , ) def lowercase_ ( self :Any , _A :str ) -> int: '''simple docstring''' __A = self.pooler(_A ) __A = self.attention(_A ) __A = hidden_state * attention return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :int , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> List[Any]: '''simple docstring''' super().__init__() __A = in_channels != out_channels or stride != 1 __A = max(1 , out_channels // config.groups_width ) __A = ( RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity() ) __A = nn.Sequential( RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , ) __A = ACTaFN[config.hidden_act] def lowercase_ ( self :Optional[Any] , _A :int ) -> int: '''simple docstring''' __A = hidden_state __A = self.layer(_A ) __A = self.shortcut(_A ) hidden_state += residual __A = self.activation(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Optional[int] , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> Any: '''simple docstring''' super().__init__() __A = in_channels != out_channels or stride != 1 __A = max(1 , out_channels // config.groups_width ) __A = ( RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity() ) __A = nn.Sequential( RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , ) __A = ACTaFN[config.hidden_act] def lowercase_ ( self :int , _A :int ) -> int: '''simple docstring''' __A = hidden_state __A = self.layer(_A ) __A = self.shortcut(_A ) hidden_state += residual __A = self.activation(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Tuple , _A :RegNetConfig , _A :int , _A :int , _A :int = 2 , _A :int = 2 , ) -> Any: '''simple docstring''' super().__init__() __A = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __A = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , ) def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Tuple: '''simple docstring''' __A = self.layers(_A ) return hidden_state class UpperCamelCase__ ( nn.Module): def __init__( self :Union[str, Any] , _A :RegNetConfig ) -> List[str]: '''simple docstring''' super().__init__() __A = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __A = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ): self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) ) def lowercase_ ( self :str , _A :Tensor , _A :bool = False , _A :bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' __A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __A = hidden_states + (hidden_state,) __A = stage_module(_A ) if output_hidden_states: __A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): UpperCAmelCase__ : int = RegNetConfig UpperCAmelCase__ : Dict = 'regnet' UpperCAmelCase__ : int = 'pixel_values' UpperCAmelCase__ : Optional[int] = True def lowercase_ ( self :str , _A :Optional[int] ) -> Tuple: '''simple docstring''' if isinstance(_A , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ ( self :int , _A :str , _A :Dict=False ) -> Dict: '''simple docstring''' if isinstance(_A , _A ): __A = value a__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" a__ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): def __init__( self :List[str] , _A :List[Any] ) -> List[str]: '''simple docstring''' super().__init__(_A ) __A = config __A = RegNetEmbeddings(_A ) __A = RegNetEncoder(_A ) __A = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ ( self :List[Any] , _A :Tensor , _A :Optional[bool] = None , _A :Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.embedder(_A ) __A = self.encoder( _A , output_hidden_states=_A , return_dict=_A ) __A = encoder_outputs[0] __A = self.pooler(_A ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): def __init__( self :Optional[int] , _A :Optional[Any] ) -> Optional[Any]: '''simple docstring''' super().__init__(_A ) __A = config.num_labels __A = RegNetModel(_A ) # classification head __A = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ ( self :Optional[int] , _A :Optional[torch.FloatTensor] = None , _A :Optional[torch.LongTensor] = None , _A :Optional[bool] = None , _A :Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.regnet(_A , output_hidden_states=_A , return_dict=_A ) __A = outputs.pooler_output if return_dict else outputs[1] __A = self.classifier(_A ) __A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __A = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __A = 'single_label_classification' else: __A = 'multi_label_classification' if self.config.problem_type == "regression": __A = MSELoss() if self.num_labels == 1: __A = loss_fct(logits.squeeze() , labels.squeeze() ) else: __A = loss_fct(_A , _A ) elif self.config.problem_type == "single_label_classification": __A = CrossEntropyLoss() __A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __A = BCEWithLogitsLoss() __A = loss_fct(_A , _A ) if not return_dict: __A = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
161
0
import argparse import os import re _SCREAMING_SNAKE_CASE : List[str] = """src/diffusers""" # Pattern that looks at the indentation in a line. _SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. _SCREAMING_SNAKE_CASE : Any = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. _SCREAMING_SNAKE_CASE : str = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""\[([^\]]+)\]""") def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ : str =_re_indent.search(UpperCAmelCase ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None ): '''simple docstring''' UpperCamelCase__ : int =0 UpperCamelCase__ : Union[str, Any] =code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(UpperCAmelCase ): index += 1 UpperCamelCase__ : Optional[int] =['''\n'''.join(lines[:index] )] else: UpperCamelCase__ : List[Any] =[] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCamelCase__ : Dict =[lines[index]] index += 1 while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(UpperCAmelCase ) ) if index < len(UpperCAmelCase ) - 1: UpperCamelCase__ : Optional[Any] =[lines[index + 1]] index += 1 else: UpperCamelCase__ : List[str] =[] else: blocks.append('''\n'''.join(UpperCAmelCase ) ) UpperCamelCase__ : List[Any] =[lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCAmelCase ) > 0: blocks.append('''\n'''.join(UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCAmelCase ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( UpperCAmelCase : str ): '''simple docstring''' def _inner(UpperCAmelCase : Dict ): return key(UpperCAmelCase ).lower().replace('''_''' , '''''' ) return _inner def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Dict=None ): '''simple docstring''' def noop(UpperCAmelCase : Optional[Any] ): return x if key is None: UpperCamelCase__ : int =noop # Constants are all uppercase, they go first. UpperCamelCase__ : List[str] =[obj for obj in objects if key(UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCamelCase__ : Dict =[obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. UpperCamelCase__ : int =[obj for obj in objects if not key(UpperCAmelCase )[0].isupper()] UpperCamelCase__ : Optional[int] =ignore_underscore(UpperCAmelCase ) return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ): '''simple docstring''' def _replace(UpperCAmelCase : Union[str, Any] ): UpperCamelCase__ : List[str] =match.groups()[0] if "," not in imports: return F'''[{imports}]''' UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCamelCase__ : Tuple =keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]" UpperCamelCase__ : List[Any] =import_statement.split('''\n''' ) if len(UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCamelCase__ : List[str] =2 if lines[1].strip() == '''[''' else 1 UpperCamelCase__ : List[str] =[(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCamelCase__ : List[str] =sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] ) UpperCamelCase__ : Tuple =[lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCamelCase__ : Dict =_re_bracket_content.sub(_replace , lines[1] ) else: UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCamelCase__ : Tuple =keys[:-1] UpperCamelCase__ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) return "\n".join(UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line UpperCamelCase__ : List[str] =_re_bracket_content.sub(_replace , UpperCAmelCase ) return import_statement def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=True ): '''simple docstring''' with open(UpperCAmelCase , '''r''' ) as f: UpperCamelCase__ : int =f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCamelCase__ : Optional[int] =split_code_in_indented_blocks( UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCamelCase__ : Dict =main_blocks[block_idx] UpperCamelCase__ : List[str] =block.split('''\n''' ) # Get to the start of the imports. UpperCamelCase__ : str =0 while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCamelCase__ : Optional[int] =len(UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] ) UpperCamelCase__ : Tuple =get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCamelCase__ : str =split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCamelCase__ : str =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCamelCase__ : Tuple =[(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCamelCase__ : List[Any] =[(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None] UpperCamelCase__ : Optional[Any] =[x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCamelCase__ : Union[str, Any] =0 UpperCamelCase__ : str =[] for i in range(len(UpperCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: UpperCamelCase__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCAmelCase ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(UpperCAmelCase , '''w''' ) as f: f.write('''\n'''.join(UpperCAmelCase ) ) def _lowerCAmelCase ( UpperCAmelCase : Dict=True ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] =[] for root, _, files in os.walk(UpperCAmelCase ): if "__init__.py" in files: UpperCamelCase__ : List[Any] =sort_imports(os.path.join(UpperCAmelCase , '''__init__.py''' ) , check_only=UpperCAmelCase ) if result: UpperCamelCase__ : int =[os.path.join(UpperCAmelCase , '''__init__.py''' )] if len(UpperCAmelCase ) > 0: raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
364
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[Any] = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class __a : """simple docstring""" def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ): logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) UpperCamelCase__ : Optional[Any] =model UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ ) UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ ) def __call__( self : Any , **lowercase_ : Any ): UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()} return self.model.run(lowercase_ , lowercase_ ) @staticmethod def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ): if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) UpperCamelCase__ : List[str] ='''CPUExecutionProvider''' return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ ) def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ): UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name ) UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ , lowercase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ ) if src_path.exists(): UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ , lowercase_ ) except shutil.SameFileError: pass def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ): if os.path.isfile(lowercase_ ): logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(lowercase_ , exist_ok=lowercase_ ) # saving model weights/files self._save_pretrained(lowercase_ , **lowercase_ ) @classmethod def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase_ ): UpperCamelCase__ : Any =OnnxRuntimeModel.load_model( os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ ) UpperCamelCase__ : List[str] =Path(lowercase_ ) # load model from hub else: # download model UpperCamelCase__ : Tuple =hf_hub_download( repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , ) UpperCamelCase__ : Any =Path(lowercase_ ).parent UpperCamelCase__ : List[Any] =Path(lowercase_ ).name UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ ) return cls(model=lowercase_ , **lowercase_ ) @classmethod def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ): UpperCamelCase__ : Dict =None if len(str(lowercase_ ).split('''@''' ) ) == 2: UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' ) return cls._from_pretrained( model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
157
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets A__ : Tuple = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' A__ : Tuple = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' A__ : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : int ) -> Union[str, Any]: return float((preds == labels).mean() ) def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> Dict: lowerCamelCase_ : str =simple_accuracy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : Dict =float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : int ) -> List[str]: lowerCamelCase_ : Union[str, Any] =np.array(lowerCamelCase__ ) lowerCamelCase_ : Dict =np.array(lowerCamelCase__ ) lowerCamelCase_ : Dict =en_sentvecs.shape[0] # mean centering lowerCamelCase_ : int =en_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) lowerCamelCase_ : Union[str, Any] =in_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) lowerCamelCase_ : Optional[Any] =cdist(lowerCamelCase__ , lowerCamelCase__ , "cosine" ) lowerCamelCase_ : Union[str, Any] =np.array(range(lowerCamelCase__ ) ) lowerCamelCase_ : Any =sim.argsort(axis=1 )[:, :10] lowerCamelCase_ : List[str] =np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : str ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(snake_case__ , snake_case__ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(snake_case__ , snake_case__ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
144
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin A__ : Union[str, Any] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class lowercase__ : def __init__( self : List[Any] , snake_case__ : int , snake_case__ : List[str]=16 , snake_case__ : Tuple=13 , snake_case__ : Dict=7 , snake_case__ : List[Any]=14 , snake_case__ : List[Any]=10 , snake_case__ : Dict=19 , snake_case__ : List[str]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : str=True , snake_case__ : int=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=4 , snake_case__ : Dict=4 , snake_case__ : int="gelu" , snake_case__ : Dict=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=[1, 2, 3, 4, 5] , snake_case__ : Optional[int]=25 , snake_case__ : Dict=5 , ): lowerCamelCase_ : Dict =d_model lowerCamelCase_ : int =parent lowerCamelCase_ : Optional[Any] =batch_size lowerCamelCase_ : int =prediction_length lowerCamelCase_ : Optional[int] =context_length lowerCamelCase_ : Any =cardinality lowerCamelCase_ : List[str] =num_time_features lowerCamelCase_ : List[Any] =lags_sequence lowerCamelCase_ : Optional[int] =embedding_dimension lowerCamelCase_ : Union[str, Any] =is_training lowerCamelCase_ : Union[str, Any] =hidden_size lowerCamelCase_ : str =num_hidden_layers lowerCamelCase_ : Any =num_attention_heads lowerCamelCase_ : Any =intermediate_size lowerCamelCase_ : Union[str, Any] =hidden_act lowerCamelCase_ : Optional[int] =hidden_dropout_prob lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob lowerCamelCase_ : List[Any] =context_length lowerCamelCase_ : str =prediction_length + label_length lowerCamelCase_ : int =label_length lowerCamelCase_ : Union[str, Any] =moving_average lowerCamelCase_ : str =autocorrelation_factor def UpperCAmelCase__ ( self : Any ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Any] ): lowerCamelCase_ : Optional[Any] =config.context_length + max(config.lags_sequence ) lowerCamelCase_ : Any =ids_tensor([self.batch_size, 1] , config.cardinality[0] ) lowerCamelCase_ : List[Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] ) lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, _past_length] ) lowerCamelCase_ : Any =floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs lowerCamelCase_ : Tuple =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) lowerCamelCase_ : Optional[Any] =floats_tensor([self.batch_size, config.prediction_length] ) lowerCamelCase_ : Any ={ "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : str =self.get_config() lowerCamelCase_ : List[Any] =self.prepare_autoformer_inputs_dict(snake_case__ ) return config, inputs_dict def UpperCAmelCase__ ( self : str ): lowerCamelCase_ , lowerCamelCase_ : List[str] =self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : str =AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval() lowerCamelCase_ : int =model(**snake_case__ ) lowerCamelCase_ : str =outputs.encoder_last_hidden_state lowerCamelCase_ : Optional[Any] =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ : Tuple =model.get_encoder() encoder.save_pretrained(snake_case__ ) lowerCamelCase_ : Any =AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =model.create_network_inputs(**snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Optional[int] =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) lowerCamelCase_ : Dict =torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) lowerCamelCase_ : int =encoder(inputs_embeds=snake_case__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) lowerCamelCase_ : str =( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) lowerCamelCase_ : Optional[int] =torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) lowerCamelCase_ : Any =torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) lowerCamelCase_ : Optional[Any] =torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ : List[str] =model.get_decoder() decoder.save_pretrained(snake_case__ ) lowerCamelCase_ : str =AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ ) lowerCamelCase_ : List[str] =decoder( trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ): _UpperCAmelCase :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _UpperCAmelCase :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else () _UpperCAmelCase :Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {} _UpperCAmelCase :Tuple = False _UpperCAmelCase :int = False _UpperCAmelCase :int = False _UpperCAmelCase :Optional[int] = False _UpperCAmelCase :Optional[Any] = False _UpperCAmelCase :Dict = False def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : List[str] =AutoformerModelTester(self ) lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def UpperCAmelCase__ ( self : Tuple ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCamelCase_ : List[Any] =model_class(snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : str =model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ ) self.assertEqual(info["missing_keys"] , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def UpperCAmelCase__ ( self : Optional[Any] ): pass def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Any =inspect.signature(getattr(snake_case__ , "forward" ) ) # The main input is the name of the argument after `self` lowerCamelCase_ : Optional[Any] =list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case__ ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ , lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Optional[int] =model_class(snake_case__ ) lowerCamelCase_ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : Union[str, Any] =[*signature.parameters.keys()] lowerCamelCase_ : List[Any] =[ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Optional[int] =True lowerCamelCase_ : List[str] =getattr(self.model_tester , "seq_length" , snake_case__ ) lowerCamelCase_ : Dict =getattr(self.model_tester , "decoder_seq_length" , snake_case__ ) lowerCamelCase_ : List[Any] =getattr(self.model_tester , "encoder_seq_length" , snake_case__ ) lowerCamelCase_ : Optional[Any] =getattr(self.model_tester , "d_model" , snake_case__ ) lowerCamelCase_ : List[str] =getattr(self.model_tester , "num_attention_heads" , snake_case__ ) lowerCamelCase_ : Union[str, Any] =d_model // num_attention_heads for model_class in self.all_model_classes: lowerCamelCase_ : str =True lowerCamelCase_ : int =False lowerCamelCase_ : Any =True lowerCamelCase_ : Tuple =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): lowerCamelCase_ : Union[str, Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowerCamelCase_ : str =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ : List[Any] =True lowerCamelCase_ : Optional[int] =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): lowerCamelCase_ : List[str] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) lowerCamelCase_ : Optional[Any] =len(snake_case__ ) lowerCamelCase_ : List[Any] =7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case__ , snake_case__ ) # decoder attentions lowerCamelCase_ : Union[str, Any] =outputs.decoder_attentions self.assertIsInstance(snake_case__ , (list, tuple) ) self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions lowerCamelCase_ : Tuple =outputs.cross_attentions self.assertIsInstance(snake_case__ , (list, tuple) ) self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine lowerCamelCase_ : Tuple =True lowerCamelCase_ : Optional[int] =True lowerCamelCase_ : Tuple =model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): lowerCamelCase_ : Dict =model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(out_len + 2 , len(snake_case__ ) ) lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def UpperCAmelCase__ ( self : Optional[int] ): super().test_retain_grad_hidden_states_attentions() def _snake_case ( lowerCamelCase__ : Tuple="train-batch.pt" ) -> Any: lowerCamelCase_ : Tuple =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCamelCase__ , repo_type="dataset" ) lowerCamelCase_ : List[Any] =torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ ) return batch @require_torch @slow class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : int =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ ) lowerCamelCase_ : List[str] =prepare_batch() with torch.no_grad(): lowerCamelCase_ : List[Any] =model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] lowerCamelCase_ : Union[str, Any] =torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case__ ) lowerCamelCase_ : Dict =torch.tensor( [[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) ) def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : str =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ ) lowerCamelCase_ : Optional[int] =prepare_batch("val-batch.pt" ) with torch.no_grad(): lowerCamelCase_ : Union[str, Any] =model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state lowerCamelCase_ : List[Any] =torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case__ ) lowerCamelCase_ : Optional[Any] =torch.tensor( [[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : int =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ ) lowerCamelCase_ : Dict =prepare_batch("val-batch.pt" ) with torch.no_grad(): lowerCamelCase_ : Union[str, Any] =model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) lowerCamelCase_ : Tuple =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case__ ) lowerCamelCase_ : List[str] =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case__ ) lowerCamelCase_ : Any =outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
144
1
from math import ceil, sqrt def lowerCamelCase__ ( A__ : int = 1000000 ): '''simple docstring''' __lowerCamelCase = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __lowerCamelCase = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
29
import os from math import logaa def lowerCamelCase__ ( A__ : str = "base_exp.txt" ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ): __lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) ) if x * logaa(A__ ) > largest: __lowerCamelCase = x * logaa(A__ ) __lowerCamelCase = i + 1 return result if __name__ == "__main__": print(solution())
29
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
299
from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ =["torch", "scipy"] def __init__( self , *_A , **_A ) -> Tuple: requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _UpperCamelCase ( cls , *_A , **_A ) -> Any: requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple: requires_backends(cls , ['''torch''', '''scipy'''] )
299
1
'''simple docstring''' from collections.abc import Callable class UpperCAmelCase : def __init__( self :Tuple , lowercase_ :Callable | None = None )-> None: # Stores actual heap items. A__ = [] # Stores indexes of each item for supporting updates and deletion. A__ = {} # Stores current size of heap. A__ = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. A__ = key or (lambda lowercase_ : x) def UpperCAmelCase_ ( self :Tuple , lowercase_ :int )-> int | None: return int((i - 1) / 2 ) if i > 0 else None def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int | None: A__ = int(2 * i + 1 ) return left if 0 < left < self.size else None def UpperCAmelCase_ ( self :Tuple , lowercase_ :int )-> int | None: A__ = int(2 * i + 2 ) return right if 0 < right < self.size else None def UpperCAmelCase_ ( self :int , lowercase_ :int , lowercase_ :int )-> None: A__, A__ = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. A__, A__ = self.arr[j], self.arr[i] def UpperCAmelCase_ ( self :List[Any] , lowercase_ :int , lowercase_ :int )-> bool: return self.arr[i][1] < self.arr[j][1] def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int: A__ = self._left(lowercase_ ) A__ = self._right(lowercase_ ) A__ = i if left is not None and not self._cmp(lowercase_ , lowercase_ ): A__ = left if right is not None and not self._cmp(lowercase_ , lowercase_ ): A__ = right return valid_parent def UpperCAmelCase_ ( self :Any , lowercase_ :int )-> None: A__ = self._parent(lowercase_ ) while parent is not None and not self._cmp(lowercase_ , lowercase_ ): self._swap(lowercase_ , lowercase_ ) A__, A__ = parent, self._parent(lowercase_ ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int )-> None: A__ = self._get_valid_parent(lowercase_ ) while valid_parent != index: self._swap(lowercase_ , lowercase_ ) A__, A__ = valid_parent, self._get_valid_parent(lowercase_ ) def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int , lowercase_ :int )-> None: if item not in self.pos_map: return A__ = self.pos_map[item] A__ = [item, self.key(lowercase_ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(lowercase_ ) self._heapify_down(lowercase_ ) def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int )-> None: if item not in self.pos_map: return A__ = self.pos_map[item] del self.pos_map[item] A__ = self.arr[self.size - 1] A__ = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(lowercase_ ) self._heapify_down(lowercase_ ) def UpperCAmelCase_ ( self :Tuple , lowercase_ :int , lowercase_ :int )-> None: A__ = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(lowercase_ )] ) else: A__ = [item, self.key(lowercase_ )] A__ = self.size self.size += 1 self._heapify_up(self.size - 1 ) def UpperCAmelCase_ ( self :List[Any] )-> tuple | None: return self.arr[0] if self.size else None def UpperCAmelCase_ ( self :List[str] )-> tuple | None: A__ = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def UpperCamelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
123
'''simple docstring''' def UpperCamelCase ( ): A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] A__ = 6 A__ = 1 A__ = 19_01 A__ = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 A__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 A__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 A__ = day - days_per_month[month - 2] if month > 12: year += 1 A__ = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
123
1
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) ->Union[str, Any]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ) ->Optional[int]: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : str ) ->Dict: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ) ->Union[str, Any]: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) ->Tuple: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} _SCREAMING_SNAKE_CASE = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} _SCREAMING_SNAKE_CASE = features.copy() _SCREAMING_SNAKE_CASE = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) ->Dict: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) ->Any: if issubclass(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE = jsonl_path elif issubclass(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE = [jsonl_path] _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int=("train",) ) ->Optional[int]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) for split in splits: _SCREAMING_SNAKE_CASE = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int ) ->List[Any]: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) ->str: _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ) ->int: if split: _SCREAMING_SNAKE_CASE = {split: jsonl_path} else: _SCREAMING_SNAKE_CASE = """train""" _SCREAMING_SNAKE_CASE = {"""train""": jsonl_path, """test""": jsonl_path} _SCREAMING_SNAKE_CASE = tmp_path / """cache""" _SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase ( __lowerCamelCase : List[str] ) ->Optional[Any]: return json.load(__lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : int ) ->str: return [json.loads(__lowerCamelCase ) for line in buffer] class a_ : '''simple docstring''' @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def snake_case_( self , A , A , A ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(A , A , lines=A ).write() buffer.seek(0 ) _SCREAMING_SNAKE_CASE = load_json_function(A ) assert isinstance(A , A ) assert isinstance(exported_content[0] , A ) assert len(A ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def snake_case_( self , A , A , A , A , A ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(A , A , lines=A , orient=A ).write() buffer.seek(0 ) _SCREAMING_SNAKE_CASE = load_json(A ) assert isinstance(A , A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(A , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(A ) == 10 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def snake_case_( self , A , A , A ) -> Tuple: with io.BytesIO() as buffer: JsonDatasetWriter(A , A , lines=A , num_proc=2 ).write() buffer.seek(0 ) _SCREAMING_SNAKE_CASE = load_json_function(A ) assert isinstance(A , A ) assert isinstance(exported_content[0] , A ) assert len(A ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def snake_case_( self , A , A , A , A , A ) -> List[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(A , A , lines=A , orient=A , num_proc=2 ).write() buffer.seek(0 ) _SCREAMING_SNAKE_CASE = load_json(A ) assert isinstance(A , A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(A , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(A ) == 10 def snake_case_( self , A ) -> int: with pytest.raises(A ): with io.BytesIO() as buffer: JsonDatasetWriter(A , A , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def snake_case_( self , A , A , A , A , A ) -> Any: _SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / f'test.json.{extension}' _SCREAMING_SNAKE_CASE = str(shared_datadir / f'test_file.json.{extension}' ) JsonDatasetWriter(A , A , compression=A ).write() with fsspec.open(A , """rb""" , compression="""infer""" ) as f: _SCREAMING_SNAKE_CASE = f.read() with fsspec.open(A , """rb""" , compression="""infer""" ) as f: _SCREAMING_SNAKE_CASE = f.read() assert exported_content == original_content
58
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() A : str = logging.get_logger(__name__) def a__ ( __UpperCamelCase , __UpperCamelCase=False ): SCREAMING_SNAKE_CASE_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE_ = "" else: SCREAMING_SNAKE_CASE_ = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_ = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE_ = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :] def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = val def a__ ( ): SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = DeiTConfig() # all deit models have fine-tuned heads SCREAMING_SNAKE_CASE_ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size SCREAMING_SNAKE_CASE_ = 1_0_0_0 SCREAMING_SNAKE_CASE_ = "huggingface/label-files" SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = idalabel SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] ) SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): SCREAMING_SNAKE_CASE_ = 1_9_2 SCREAMING_SNAKE_CASE_ = 7_6_8 SCREAMING_SNAKE_CASE_ = 1_2 SCREAMING_SNAKE_CASE_ = 3 elif deit_name[9:].startswith("small" ): SCREAMING_SNAKE_CASE_ = 3_8_4 SCREAMING_SNAKE_CASE_ = 1_5_3_6 SCREAMING_SNAKE_CASE_ = 1_2 SCREAMING_SNAKE_CASE_ = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): SCREAMING_SNAKE_CASE_ = 1_0_2_4 SCREAMING_SNAKE_CASE_ = 4_0_9_6 SCREAMING_SNAKE_CASE_ = 2_4 SCREAMING_SNAKE_CASE_ = 1_6 # load original model from timm SCREAMING_SNAKE_CASE_ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE_ = timm_model.state_dict() SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # load HuggingFace model SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor SCREAMING_SNAKE_CASE_ = int( (2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=__UpperCamelCase , crop_size=config.image_size ) SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" ) SCREAMING_SNAKE_CASE_ = encoding["pixel_values"] SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) A : Dict = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
118
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCamelCase__ ( A : List[str] , A : Union[str, Any] ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',) UpperCAmelCase = torch.permute(A , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(A ): # linear layer UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',) UpperCAmelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def lowerCamelCase__ ( A : str , A : Tuple , A : Any ): '''simple docstring''' if "metadata" in layer: UpperCAmelCase = layer.split('''metadata''' ) UpperCAmelCase = ''''''.join(split_layer[0] )[:-1] UpperCAmelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: UpperCAmelCase = layer.split('''kvstore''' ) UpperCAmelCase = ''''''.join(split_layer[0] )[:-1] UpperCAmelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: UpperCAmelCase = layer.split('''/''' ) UpperCAmelCase = '''/'''.join(split_layer[:-1] ) UpperCAmelCase = (split_layer[-1],) if "kvstore/path" in layer: UpperCAmelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: UpperCAmelCase = '''file''' else: UpperCAmelCase = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCamelCase__ ( A : Any , A : Dict ): '''simple docstring''' UpperCAmelCase = rename_keys(A ) UpperCAmelCase = {} for k, v in current_block.items(): UpperCAmelCase = v UpperCAmelCase = new_current_block torch.save(A , A ) def lowerCamelCase__ ( A : int , A : Optional[Any] , A : int , A : Optional[Any] , A : str = WEIGHTS_NAME ): '''simple docstring''' UpperCAmelCase = convert_file_size_to_int(A ) UpperCAmelCase = [] UpperCAmelCase = {} UpperCAmelCase = 0 UpperCAmelCase = 0 os.makedirs(A , exist_ok=A ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: UpperCAmelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] UpperCAmelCase = flatten_dict(A , sep='''/''' ) UpperCAmelCase = {} for layer in checkpoint_info.keys(): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_key_and_tensorstore_dict( A , A , A ) if curr_real_layer_name in all_layers: UpperCAmelCase = content else: UpperCAmelCase = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCAmelCase = torch.tensor(A ) UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCAmelCase , UpperCAmelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , A ) UpperCAmelCase = '''/'''.join(A ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCAmelCase = os.path.join( A , weights_name.replace('''.bin''' , f"""-{len(A )+1:05d}-of-???.bin""" ) ) rename_and_save_block(A , A ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCAmelCase = {} UpperCAmelCase = 0 UpperCAmelCase = raw_weights.to(getattr(A , A ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCAmelCase = os.path.join(A , weights_name.replace('''.bin''' , f"""-{len(A )+1:05d}-of-???.bin""" ) ) rename_and_save_block(A , A ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(A ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCAmelCase = {} UpperCAmelCase = {} for idx, shard in enumerate(A ): UpperCAmelCase = weights_name.replace( '''.bin''' , f"""-{idx+1:05d}-of-{len(A ):05d}.bin""" ) # len(sharded_state_dicts):05d} UpperCAmelCase = os.path.join(A , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(A , os.path.join(A , A ) ) UpperCAmelCase = shard for key in shard: UpperCAmelCase = shard_file # Add the metadata UpperCAmelCase = {'''total_size''': total_size} UpperCAmelCase = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(A , A ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase = json.dumps(A , indent=2 , sort_keys=A ) + '''\n''' f.write(A ) return metadata, index if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _lowercase : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCamelCase__ ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCAmelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) UpperCAmelCase = TaTokenizer.from_pretrained('''t5-small''' ) UpperCAmelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' UpperCAmelCase = tokenizer(A , return_tensors='''pt''' ).input_ids UpperCAmelCase = model.generate(A , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
91
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class UpperCamelCase__: def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : int="resnet50" , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=True , )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = parent UpperCAmelCase = out_indices if out_indices is not None else [4] UpperCAmelCase = stage_names UpperCAmelCase = out_features UpperCAmelCase = backbone UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = num_channels UpperCAmelCase = use_pretrained_backbone UpperCAmelCase = is_training def a__( self : Optional[Any] )-> Optional[int]: """simple docstring""" UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = self.get_config() return config, pixel_values def a__( self : int )-> Optional[Any]: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Dict )-> Tuple: """simple docstring""" UpperCAmelCase = TimmBackbone(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase = model(lowerCAmelCase ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a__( self : str )-> Optional[Any]: """simple docstring""" UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): __magic_name__ : List[str] = (TimmBackbone,) if is_torch_available() else () __magic_name__ : Any = {"feature-extraction": TimmBackbone} if is_torch_available() else {} __magic_name__ : Union[str, Any] = False __magic_name__ : int = False __magic_name__ : Tuple = False __magic_name__ : List[str] = False def a__( self : int )-> str: """simple docstring""" UpperCAmelCase = TimmBackboneModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase ) def a__( self : List[str] )-> Dict: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__( self : List[Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = '''resnet18''' UpperCAmelCase = '''microsoft/resnet-18''' UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase ) UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase , out_indices=[1, 2, 3] ) UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a__( self : Union[str, Any] )-> Tuple: """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a__( self : Dict )-> Dict: """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a__( self : Tuple )-> List[Any]: """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a__( self : List[str] )-> Optional[Any]: """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a__( self : Optional[Any] )-> Optional[Any]: """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a__( self : Any )-> Optional[int]: """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a__( self : Optional[int] )-> Optional[int]: """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a__( self : str )-> str: """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a__( self : Any )-> int: """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a__( self : List[Any] )-> int: """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a__( self : List[Any] )-> List[str]: """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a__( self : List[str] )-> Dict: """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a__( self : str )-> Tuple: """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a__( self : List[Any] )-> List[Any]: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a__( self : int )-> Tuple: """simple docstring""" pass def a__( self : Dict )-> Union[str, Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def a__( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = True UpperCAmelCase = self.has_attentions # no need to test all models as different heads yield the same functionality UpperCAmelCase = self.all_model_classes[0] UpperCAmelCase = model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase = model(**lowerCAmelCase ) UpperCAmelCase = outputs[0][-1] # Encoder-/Decoder-only models UpperCAmelCase = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowerCAmelCase ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a__( self : Optional[int] )-> Optional[Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() UpperCAmelCase = model(**lowerCAmelCase ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None UpperCAmelCase = copy.deepcopy(lowerCAmelCase ) UpperCAmelCase = None UpperCAmelCase = model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() UpperCAmelCase = model(**lowerCAmelCase ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights UpperCAmelCase = copy.deepcopy(lowerCAmelCase ) UpperCAmelCase = False UpperCAmelCase = model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() UpperCAmelCase = model(**lowerCAmelCase )
91
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class lowerCAmelCase_ ( A__ ): '''simple docstring''' _snake_case = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) _snake_case = Features({'''audio''': Audio()} ) _snake_case = Features({'''transcription''': Value('''string''' )} ) _snake_case = "audio" _snake_case = "transcription" def A__ ( self , snake_case_ ) -> Optional[int]: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , snake_case_ ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) __lowerCAmelCase = copy.deepcopy(self ) __lowerCAmelCase = self.input_schema.copy() __lowerCAmelCase = features[self.audio_column] __lowerCAmelCase = input_schema return task_template @property def A__ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
301
"""simple docstring""" from __future__ import annotations def lowercase (_lowerCAmelCase , _lowerCAmelCase ): __lowerCAmelCase = [] create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase ) return result def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): if level == 0: total_list.append(current_list[:] ) return for i in range(_lowerCAmelCase , total_number - level + 2 ): current_list.append(_lowerCAmelCase ) create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase ) current_list.pop() def lowercase (_lowerCAmelCase ): for i in total_list: print(*_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k) print_all_state(total_list)
301
1
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> list[int]: # This function is recursive '''simple docstring''' _UpperCAmelCase = len(_UpperCAmelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else _UpperCAmelCase = array[0] _UpperCAmelCase = False _UpperCAmelCase = 1 _UpperCAmelCase = [] while not is_found and i < array_length: if array[i] < pivot: _UpperCAmelCase = True _UpperCAmelCase = [element for element in array[i:] if element >= array[i]] _UpperCAmelCase = longest_subsequence(_UpperCAmelCase ) if len(_UpperCAmelCase ) > len(_UpperCAmelCase ): _UpperCAmelCase = temp_array else: i += 1 _UpperCAmelCase = [element for element in array[1:] if element >= pivot] _UpperCAmelCase = [pivot, *longest_subsequence(_UpperCAmelCase )] if len(_UpperCAmelCase ) > len(_UpperCAmelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
290
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def A ( ) -> tuple[list[int], int]: '''simple docstring''' _UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )] _UpperCAmelCase = randint(-5_000 , 5_000 ) return (arr, r) UpperCAmelCase__ = make_dataset() def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, ...]: '''simple docstring''' for triplet in permutations(_UpperCAmelCase , 3 ): if sum(_UpperCAmelCase ) == target: return tuple(sorted(_UpperCAmelCase ) ) return (0, 0, 0) def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, int, int]: '''simple docstring''' arr.sort() _UpperCAmelCase = len(_UpperCAmelCase ) for i in range(n - 1 ): _UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def A ( ) -> tuple[float, float]: '''simple docstring''' _UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' _UpperCAmelCase = '\ntriplet_sum1(*dataset)\n' _UpperCAmelCase = '\ntriplet_sum2(*dataset)\n' _UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 ) _UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 ) return (min(_UpperCAmelCase ), min(_UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase__ = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
290
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a :Optional[int] = logging.get_logger(__name__) a :int = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :str = """falcon""" _SCREAMING_SNAKE_CASE :str = ["""past_key_values"""] def __init__( self , _a=65_024 , _a=4_544 , _a=32 , _a=71 , _a=1E-5 , _a=0.02 , _a=True , _a=0.0 , _a=0.0 , _a=None , _a=False , _a=False , _a=True , _a=True , _a=False , _a=11 , _a=11 , **_a , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size # Backward compatibility with n_embed kwarg SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("""n_embed""" , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = hidden_size if n_embed is None else n_embed SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = use_cache SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout SCREAMING_SNAKE_CASE__ : Optional[Any] = bos_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads if num_kv_heads is None else num_kv_heads SCREAMING_SNAKE_CASE__ : Any = alibi SCREAMING_SNAKE_CASE__ : str = new_decoder_architecture SCREAMING_SNAKE_CASE__ : Tuple = multi_query # Ignored when new_decoder_architecture is True SCREAMING_SNAKE_CASE__ : str = parallel_attn SCREAMING_SNAKE_CASE__ : str = bias super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _a ( self ) -> Dict: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def _a ( self ) -> Dict: """simple docstring""" return not self.alibi
132
'''simple docstring''' SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1 def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase: Union[str, Any] = logging.get_logger(__name__) _UpperCamelCase: int = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class a__ ( _UpperCAmelCase, _UpperCAmelCase ): _lowerCamelCase = 'bit' _lowerCamelCase = ['preactivation', 'bottleneck'] _lowerCamelCase = ['SAME', 'VALID'] def __init__( self : Optional[int], lowerCAmelCase : Optional[int]=3, lowerCAmelCase : List[Any]=64, lowerCAmelCase : Tuple=[256, 512, 1024, 2048], lowerCAmelCase : str=[3, 4, 6, 3], lowerCAmelCase : Optional[int]="preactivation", lowerCAmelCase : Optional[Any]="relu", lowerCAmelCase : List[str]=None, lowerCAmelCase : str=32, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Tuple=False, lowerCAmelCase : Any=32, lowerCAmelCase : Dict=1, lowerCAmelCase : Any=None, lowerCAmelCase : Union[str, Any]=None, **lowerCAmelCase : Union[str, Any], ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: lowercase : str = global_padding.upper() else: raise ValueError(f'''Padding strategy {global_padding} not supported''' ) lowercase : Optional[int] = num_channels lowercase : str = embedding_size lowercase : List[Any] = hidden_sizes lowercase : Any = depths lowercase : Optional[int] = layer_type lowercase : List[str] = hidden_act lowercase : Any = global_padding lowercase : Dict = num_groups lowercase : Optional[int] = drop_path_rate lowercase : Dict = embedding_dynamic_padding lowercase : str = output_stride lowercase : str = width_factor lowercase : int = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(SCREAMING_SNAKE_CASE_ ) + 1 )] lowercase : List[str] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
355
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ): _lowerCamelCase = PhobertTokenizer _lowerCamelCase = False def lowercase ( self : Optional[Any] ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : Optional[Any] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] lowercase : Any = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowercase : int = ['#version: 0.2', 'l à</w>'] lowercase : Tuple = {'unk_token': '<unk>'} lowercase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase ) ) def lowercase ( self : List[str], **lowerCAmelCase : Optional[Any] ) -> Tuple: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase ( self : Union[str, Any], lowerCAmelCase : Dict ) -> Optional[int]: lowercase : List[Any] = 'Tôi là VinAI Research' lowercase : Any = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def lowercase ( self : int ) -> Tuple: lowercase : List[Any] = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) lowercase : List[str] = 'Tôi là VinAI Research' lowercase : Dict = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() lowercase : int = tokenizer.tokenize(lowerCAmelCase ) print(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) lowercase : str = tokens + [tokenizer.unk_token] lowercase : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
53
0
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase_ : str =1 @register_to_config def __init__( self ,A__=2_0_0_0 ,A__=0.1 ,A__=2_0 ,A__=1E-3): lowercase = None lowercase = None lowercase = None def A__ ( self ,A__ ,A__ = None): lowercase = torch.linspace(1 ,self.config.sampling_eps ,A__ ,device=A__) def A__ ( self ,A__ ,A__ ,A__ ,A__=None): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''') # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowercase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) lowercase = std.flatten() while len(std.shape) < len(score.shape): lowercase = std.unsqueeze(-1) lowercase = -score / std # compute lowercase = -1.0 / len(self.timesteps) lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowercase = beta_t.flatten() while len(beta_t.shape) < len(x.shape): lowercase = beta_t.unsqueeze(-1) lowercase = -0.5 * beta_t * x lowercase = torch.sqrt(A__) lowercase = drift - diffusion**2 * score lowercase = x + drift * dt # add noise lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=A__ ,device=x.device ,dtype=x.dtype) lowercase = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__( self): return self.config.num_train_timesteps
101
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ): """simple docstring""" SCREAMING_SNAKE_CASE =None if token is not None: SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE ='636036' SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json() return result["workflow_runs"] def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE =workflow_run['id'] break return workflow_run_id def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE =artifacts_links[artifact_name] download_artifact( artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' ) if os.path.isfile(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE ={} with zipfile.ZipFile(lowerCAmelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase_ ): # read the file with z.open(lowerCAmelCase_ ) as f: SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' ) return results
334
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __snake_case = get_tests_dir("""fixtures""") class lowercase__ ( unittest.TestCase ): def A_ ( self : List[Any] ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE__ = mock.Mock() SCREAMING_SNAKE_CASE__ = 500 SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = HTTPError SCREAMING_SNAKE_CASE__ = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_ ) as mock_head: SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def A_ ( self : int ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def A_ ( self : List[Any] ): with self.assertRaises(UpperCAmelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' ) self.assertIsNotNone(UpperCAmelCase_ ) @is_staging_test class lowercase__ ( unittest.TestCase ): @classmethod def A_ ( cls : List[str] ): SCREAMING_SNAKE_CASE__ = TOKEN HfFolder.save_token(UpperCAmelCase_ ) @classmethod def A_ ( cls : List[str] ): try: delete_repo(token=cls._token , repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' ) except HTTPError: pass def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(UpperCAmelCase_ ) image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCAmelCase_ , repo_id='test-image-processor' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(UpperCAmelCase_ ) image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCAmelCase_ , repo_id='valid_org/test-image-processor-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) def A_ ( self : str ): CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE__ = CustomImageProcessor.from_pretrained(UpperCAmelCase_ ) image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , ) SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained( F'{USER}/test-dynamic-image-processor' , trust_remote_code=UpperCAmelCase_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
169
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __snake_case = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") __snake_case = get_tests_dir("""fixtures/vocab.json""") __snake_case = get_tests_dir("""fixtures""") class lowercase__ ( unittest.TestCase ): A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = 0 def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = WavaVecaConfig() SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' ) # save in new folder model_config.save_pretrained(UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor() SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) # save in new folder processor.save_pretrained(UpperCAmelCase_ ) # drop `processor_class` in tokenizer with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ ) config_dict.pop('processor_class' ) with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f: f.write(json.dumps(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor() SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) # save in new folder processor.save_pretrained(UpperCAmelCase_ ) # drop `processor_class` in feature extractor with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ ) config_dict.pop('processor_class' ) with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f: f.write(json.dumps(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' ) model_config.save_pretrained(UpperCAmelCase_ ) # copy relevant files copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) ) # create emtpy sample processor with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f: f.write('{}' ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : Optional[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) SCREAMING_SNAKE_CASE__ = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) SCREAMING_SNAKE_CASE__ = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def A_ ( self : Union[str, Any] ): try: AutoConfig.register('custom' , UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' ) with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def A_ ( self : Tuple ): class lowercase__ ( _UpperCAmelCase ): A__ : Optional[int] =False class lowercase__ ( _UpperCAmelCase ): A__ : Optional[int] =False class lowercase__ ( _UpperCAmelCase ): A__ : Dict ="""AutoFeatureExtractor""" A__ : Optional[int] ="""AutoTokenizer""" A__ : str =False try: AutoConfig.register('custom' , UpperCAmelCase_ ) AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ ) # If remote code is not set, the default is to use local classes. SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(processor.__class__.__name__ , 'NewProcessor' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def A_ ( self : List[str] ): SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' ) self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' ) @is_staging_test class lowercase__ ( unittest.TestCase ): A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def A_ ( cls : str ): SCREAMING_SNAKE_CASE__ = TOKEN HfFolder.save_token(UpperCAmelCase_ ) @classmethod def A_ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id='test-processor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-processor' ) except HTTPError: pass def A_ ( self : List[str] ): SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase_ , 'test-processor' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def A_ ( self : Dict ): SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase_ , 'test-processor-org' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token , organization='valid_org' , ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def A_ ( self : Optional[Any] ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' ) with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) SCREAMING_SNAKE_CASE__ = Repository(UpperCAmelCase_ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(UpperCAmelCase_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { 'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor', 'AutoProcessor': 'custom_processing.CustomProcessor', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ ) self.assertDictEqual( tokenizer_config['auto_map'] , { 'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None], 'AutoProcessor': 'custom_processing.CustomProcessor', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_feature_extraction.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_tokenization.py' ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_processing.py' ) ) ) repo.push_to_hub() SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=UpperCAmelCase_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
169
1
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger(__name__) def __magic_name__ ( lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Tuple =os.path.abspath(lowercase ) logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model SCREAMING_SNAKE_CASE_: str =tf.train.list_variables(lowercase ) SCREAMING_SNAKE_CASE_: Optional[int] =[] SCREAMING_SNAKE_CASE_: Tuple =[] SCREAMING_SNAKE_CASE_: Tuple =[] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") SCREAMING_SNAKE_CASE_: Union[str, Any] =full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(f'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' SCREAMING_SNAKE_CASE_: Any =name[1:] # figure out how many levels deep the name is SCREAMING_SNAKE_CASE_: Optional[Any] =0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(lowercase ) # read data SCREAMING_SNAKE_CASE_: Optional[int] =tf.train.load_variable(lowercase , lowercase ) names.append("""/""".join(lowercase ) ) arrays.append(lowercase ) logger.info(f'''Read a total of {len(lowercase ):,} layers''' ) # Sanity check if len(set(lowercase ) ) != 1: raise ValueError(f'''Found layer names with different depths (layer depth {list(set(lowercase ) )})''' ) SCREAMING_SNAKE_CASE_: int =list(set(lowercase ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(lowercase , lowercase ): SCREAMING_SNAKE_CASE_: List[str] =full_name.split("""/""" ) SCREAMING_SNAKE_CASE_: Any =model SCREAMING_SNAKE_CASE_: List[Any] =[] for i, m_name in enumerate(lowercase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): SCREAMING_SNAKE_CASE_: List[str] =int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) SCREAMING_SNAKE_CASE_: Tuple =getattr(lowercase , """embeddings""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) SCREAMING_SNAKE_CASE_: str =getattr(lowercase , """encoder""" ) SCREAMING_SNAKE_CASE_: str =getattr(lowercase , """layer""" ) SCREAMING_SNAKE_CASE_: List[str] =pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) SCREAMING_SNAKE_CASE_: Any =getattr(lowercase , """pooler""" ) SCREAMING_SNAKE_CASE_: List[str] =getattr(lowercase , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) SCREAMING_SNAKE_CASE_: Any =getattr(lowercase , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =getattr(lowercase , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =getattr(lowercase , """token_type_embeddings""" ) else: raise ValueError(f'''Unknown embedding layer with name {full_name}''' ) trace.append("""weight""" ) SCREAMING_SNAKE_CASE_: List[Any] =getattr(lowercase , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) SCREAMING_SNAKE_CASE_: Optional[Any] =getattr(lowercase , """attention""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) SCREAMING_SNAKE_CASE_: Dict =getattr(lowercase , """attention""" ) SCREAMING_SNAKE_CASE_: str =getattr(lowercase , """output""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) SCREAMING_SNAKE_CASE_: Any =getattr(lowercase , """attention""" ) SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowercase , """output""" ) SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowercase , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) SCREAMING_SNAKE_CASE_: Tuple =getattr(lowercase , """output""" ) SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowercase , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowercase , """output""" ) SCREAMING_SNAKE_CASE_: Any =getattr(lowercase , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) SCREAMING_SNAKE_CASE_: str =getattr(lowercase , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) SCREAMING_SNAKE_CASE_: Dict =getattr(lowercase , """intermediate""" ) SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowercase , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) SCREAMING_SNAKE_CASE_: int =getattr(lowercase , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) SCREAMING_SNAKE_CASE_: List[str] =getattr(lowercase , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) SCREAMING_SNAKE_CASE_: Dict =getattr(lowercase , """weight""" ) else: logger.warning(f'''Ignored {m_name}''' ) # for certain layers reshape is necessary SCREAMING_SNAKE_CASE_: List[str] =""".""".join(lowercase ) if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase ) or re.match( R"""(\S+)\.attention\.output\.dense\.weight""" , lowercase ): SCREAMING_SNAKE_CASE_: Dict =array.reshape(pointer.data.shape ) if "kernel" in full_name: SCREAMING_SNAKE_CASE_: Dict =array.transpose() if pointer.shape == array.shape: SCREAMING_SNAKE_CASE_: Optional[int] =torch.from_numpy(lowercase ) else: raise ValueError( f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' f''' {array.shape}''' ) logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def __magic_name__ ( lowercase , lowercase , lowercase ): # Instantiate model logger.info(f'''Loading model based on config from {config_path}...''' ) SCREAMING_SNAKE_CASE_: Dict =BertConfig.from_json_file(lowercase ) SCREAMING_SNAKE_CASE_: str =BertModel(lowercase ) # Load weights from checkpoint logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(lowercase , lowercase , lowercase ) # Save pytorch-model logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , lowercase ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) _UpperCAmelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
173
"""simple docstring""" # flake8: noqa # Lint as: python3 _UpperCAmelCase = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
173
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : List[str] = logging.get_logger(__name__) A_ : List[str] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _a (_lowerCamelCase ): '''simple docstring''' UpperCAmelCase__: Tuple = '''levit''' def __init__( self , A__=224 , A__=3 , A__=3 , A__=2 , A__=1 , A__=16 , A__=[128, 256, 384] , A__=[4, 8, 12] , A__=[4, 4, 4] , A__=[16, 16, 16] , A__=0 , A__=[2, 2, 2] , A__=[2, 2, 2] , A__=0.0_2 , **A__ , ): super().__init__(**A__ ) A__ : Any = image_size A__ : str = num_channels A__ : Union[str, Any] = kernel_size A__ : List[str] = stride A__ : List[Any] = padding A__ : Optional[Any] = hidden_sizes A__ : str = num_attention_heads A__ : Any = depths A__ : str = key_dim A__ : Any = drop_path_rate A__ : Optional[Any] = patch_size A__ : List[Any] = attention_ratio A__ : Any = mlp_ratio A__ : Union[str, Any] = initializer_range A__ : Optional[Any] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _a (_lowerCamelCase ): '''simple docstring''' UpperCAmelCase__: Optional[Any] = version.parse('''1.11''' ) @property def __A ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __A ( self ): return 1e-4
353
from queue import PriorityQueue from typing import Any import numpy as np def UpperCamelCase (lowercase_: dict , lowercase_: str , lowercase_: set , lowercase_: set , lowercase_: dict , lowercase_: dict , lowercase_: PriorityQueue , lowercase_: dict , lowercase_: float | int , ) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue A__ : Any = cst_fwd.get(lowercase_ , np.inf ) A__ : List[Any] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A__ : Tuple = new_cost_f A__ : Any = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCamelCase (lowercase_: str , lowercase_: str , lowercase_: dict , lowercase_: dict ) -> int: A__ : Dict = -1 A__ : List[Any] = set() A__ : Union[str, Any] = set() A__ : Optional[Any] = {source: 0} A__ : int = {destination: 0} A__ : Optional[Any] = {source: None} A__ : Union[str, Any] = {destination: None} A__ : PriorityQueue[Any] = PriorityQueue() A__ : PriorityQueue[Any] = PriorityQueue() A__ : List[Any] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A__ , A__ : Tuple = queue_forward.get() visited_forward.add(lowercase_ ) A__ , A__ : Optional[Any] = queue_backward.get() visited_backward.add(lowercase_ ) A__ : List[Any] = pass_and_relaxation( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) A__ : List[Any] = pass_and_relaxation( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A__ : int = shortest_distance return shortest_path_distance A_ : List[Any] = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A_ : Optional[int] = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
141
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class lowercase_ : """simple docstring""" def __init__( self : List[Any] ,lowercase__ : int ,lowercase__ : MutableSequence[float] ): if len(lowercase__ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) __lowercase = list(lowercase__ ) __lowercase = degree def __add__( self : Optional[int] ,lowercase__ : Polynomial ): if self.degree > polynomial_a.degree: __lowercase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,lowercase__ ) else: __lowercase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,lowercase__ ) def __sub__( self : Optional[Any] ,lowercase__ : Polynomial ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self : List[str] ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self : Optional[Any] ,lowercase__ : Polynomial ): __lowercase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int | float ): __lowercase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : str ): __lowercase = '''''' for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase__ ) return polynomial def __repr__( self : Optional[int] ): return self.__str__() def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = [0] * self.degree for i in range(self.degree ): __lowercase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int | float = 0 ): __lowercase = [0] * (self.degree + 2) __lowercase = constant for i in range(self.degree + 1 ): __lowercase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,lowercase__ ) def __eq__( self : List[Any] ,lowercase__ : object ): if not isinstance(lowercase__ ,lowercase__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : int ,lowercase__ : object ): return not self.__eq__(lowercase__ )
104
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: list[float] ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) __SCREAMING_SNAKE_CASE : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) ) return round(_lowerCamelCase , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
112
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "glpn" def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=[2, 2, 2, 2], lowerCAmelCase__=[8, 4, 2, 1], lowerCAmelCase__=[32, 64, 160, 256], lowerCAmelCase__=[7, 3, 3, 3], lowerCAmelCase__=[4, 2, 2, 2], lowerCAmelCase__=[1, 2, 5, 8], lowerCAmelCase__=[4, 4, 4, 4], lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-6, lowerCAmelCase__=64, lowerCAmelCase__=10, lowerCAmelCase__=-1, **lowerCAmelCase__, ) -> str: super().__init__(**lowerCAmelCase__) snake_case_ = num_channels snake_case_ = num_encoder_blocks snake_case_ = depths snake_case_ = sr_ratios snake_case_ = hidden_sizes snake_case_ = patch_sizes snake_case_ = strides snake_case_ = mlp_ratios snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = drop_path_rate snake_case_ = layer_norm_eps snake_case_ = decoder_hidden_size snake_case_ = max_depth snake_case_ = head_in_index
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
import math def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> List[str]: UpperCamelCase__ : Any = [True] * n UpperCamelCase__ : List[Any] = False UpperCamelCase__ : Optional[int] = False UpperCamelCase__ : Optional[int] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCamelCase__ : Union[str, Any] = i * 2 while index < n: UpperCamelCase__ : List[Any] = False UpperCamelCase__ : str = index + i UpperCamelCase__ : Any = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def lowerCAmelCase_ ( __UpperCAmelCase: Tuple = 9999_6666_3333 ) -> Union[str, Any]: UpperCamelCase__ : Tuple = math.floor(math.sqrt(lowercase__ ) ) + 100 UpperCamelCase__ : Optional[int] = prime_sieve(lowercase__ ) UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : Tuple = primes[prime_index] while (last_prime**2) <= limit: UpperCamelCase__ : List[str] = primes[prime_index + 1] UpperCamelCase__ : Dict = last_prime**2 UpperCamelCase__ : int = next_prime**2 # Get numbers divisible by lps(current) UpperCamelCase__ : Any = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCamelCase__ : str = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCamelCase__ : int = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCamelCase__ : str = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
201
"""simple docstring""" import functools from typing import Any def _snake_case ( lowercase__ , lowercase__ ): # Validation if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(lowercase__ , lowercase__ ) or not all( isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie _lowerCamelCase : dict[str, Any] = {} _lowerCamelCase : List[Any] = 'WORD_KEEPER' for word in words: _lowerCamelCase : Dict = trie for c in word: if c not in trie_node: _lowerCamelCase : Any = {} _lowerCamelCase : str = trie_node[c] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Dict = len(lowercase__ ) # Dynamic programming method @functools.cache def is_breakable(lowercase__ ) -> bool: if index == len_string: return True _lowerCamelCase : List[Any] = trie for i in range(lowercase__ , lowercase__ ): _lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ ) if trie_node is None: return False if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
96
0
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = field __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Json( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , field=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset class _A : def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : Union[PathLike, BinaryIO] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.') __a = dataset __a = path_or_buf __a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __a = num_proc __a = '''utf-8''' __a = to_json_kwargs def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.to_json_kwargs.pop('''path_or_buf''' , __SCREAMING_SNAKE_CASE) __a = self.to_json_kwargs.pop('''orient''' , '''records''') __a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False) __a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True) __a = self.to_json_kwargs.pop('''compression''' , __SCREAMING_SNAKE_CASE) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression') if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf , '''wb''' , compression=__SCREAMING_SNAKE_CASE) as buffer: __a = self._write(file_obj=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' ''' was passed. Please provide a local path instead.''') __a = self._write( file_obj=self.path_or_buf , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs) return written def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str): '''simple docstring''' __a , __a , __a , __a , __a = args __a = query_table( table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size) , indices=self.dataset._indices , ) __a = batch.to_pandas().to_json( path_or_buf=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) if not json_str.endswith('''\n'''): json_str += "\n" return json_str.encode(self.encoding) def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : BinaryIO , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' __a = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): __a = self._batch_json((offset, orient, lines, index, to_json_kwargs)) written += file_obj.write(__SCREAMING_SNAKE_CASE) else: __a , __a = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(__SCREAMING_SNAKE_CASE) return written
131
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case :Tuple = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[Any] = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Any = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
131
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class a ( metaclass=UpperCAmelCase__ ): UpperCamelCase : Optional[int] = ['torch', 'torchsde'] def __init__( self : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ) -> Tuple: '''simple docstring''' requires_backends(cls , ["""torch""", """torchsde"""] )
173
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCAmelCase = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
173
1
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCamelCase__( __A ): def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,**__UpperCAmelCase ,) -> Dict: super().__init__(features=__UpperCAmelCase ,cache_dir=__UpperCAmelCase ,keep_in_memory=__UpperCAmelCase ,**__UpperCAmelCase ) A__ = Sql( cache_dir=__UpperCAmelCase ,features=__UpperCAmelCase ,sql=__UpperCAmelCase ,con=__UpperCAmelCase ,**__UpperCAmelCase ,) def snake_case__ ( self ) -> Union[str, Any]: A__ = None A__ = None A__ = None A__ = None self.builder.download_and_prepare( download_config=__UpperCAmelCase ,download_mode=__UpperCAmelCase ,verification_mode=__UpperCAmelCase ,base_path=__UpperCAmelCase ,) # Build dataset for splits A__ = self.builder.as_dataset( split='train' ,verification_mode=__UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]: if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) A__ = dataset A__ = name A__ = con A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A__ = num_proc A__ = to_sql_kwargs def snake_case__ ( self ) -> int: A__ = self.to_sql_kwargs.pop('sql' ,__UpperCAmelCase ) A__ = self.to_sql_kwargs.pop('con' ,__UpperCAmelCase ) A__ = self.to_sql_kwargs.pop('index' ,__UpperCAmelCase ) A__ = self._write(index=__UpperCAmelCase ,**self.to_sql_kwargs ) return written def snake_case__ ( self ,__UpperCAmelCase ) -> Any: A__ , A__ , A__ = args A__ = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs A__ = query_table( table=self.dataset.data ,key=slice(__UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) A__ = batch.to_pandas() A__ = df.to_sql(self.name ,self.con ,index=__UpperCAmelCase ,**__UpperCAmelCase ) return num_rows or len(__UpperCAmelCase ) def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> int: A__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: A__ , A__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,__UpperCAmelCase ,__UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
154
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCamelCase__( __A , __A , __A , unittest.TestCase ): lowerCAmelCase__ : str = StableUnCLIPPipeline lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false lowerCAmelCase__ : Optional[Any] = False def snake_case__ ( self ) -> List[Any]: A__ = 32 A__ = embedder_hidden_size # prior components torch.manual_seed(0 ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) A__ = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=__UpperCAmelCase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) ) torch.manual_seed(0 ) A__ = PriorTransformer( num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=__UpperCAmelCase ,num_layers=1 ,) torch.manual_seed(0 ) A__ = DDPMScheduler( variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=10_00 ,clip_sample=__UpperCAmelCase ,clip_sample_range=5.0 ,beta_schedule='squaredcos_cap_v2' ,) # regular denoising components torch.manual_seed(0 ) A__ = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase ) A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) A__ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) ) torch.manual_seed(0 ) A__ = UNetaDConditionModel( sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=__UpperCAmelCase ,layers_per_block=1 ,upcast_attention=__UpperCAmelCase ,use_linear_projection=__UpperCAmelCase ,) torch.manual_seed(0 ) A__ = DDIMScheduler( beta_schedule='scaled_linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='v_prediction' ,set_alpha_to_one=__UpperCAmelCase ,steps_offset=1 ,) torch.manual_seed(0 ) A__ = AutoencoderKL() A__ = { # prior components 'prior_tokenizer': prior_tokenizer, 'prior_text_encoder': prior_text_encoder, 'prior': prior, 'prior_scheduler': prior_scheduler, # image noising components 'image_normalizer': image_normalizer, 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder, 'unet': unet, 'scheduler': scheduler, 'vae': vae, } return components def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> str: if str(__UpperCAmelCase ).startswith('mps' ): A__ = torch.manual_seed(__UpperCAmelCase ) else: A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'prior_num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case__ ( self ) -> List[Any]: A__ = torch_device == 'cpu' self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase ) def snake_case__ ( self ) -> int: A__ = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase ) @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ) -> List[Any]: A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' ) A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() A__ = torch.Generator(device='cpu' ).manual_seed(0 ) A__ = pipe('anime turle' ,generator=__UpperCAmelCase ,output_type='np' ) A__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> List[str]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa ) A__ = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() A__ = pipe( 'anime turtle' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='np' ,) A__ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
154
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer _A : Union[str, Any] =['''bert-base-uncased''', '''bert-base-cased'''] _A : int ='''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _lowercase ( tf.keras.Model ): def __init__( self: Optional[int] , UpperCamelCase__: List[Any] ): super().__init__() lowerCamelCase__ : Optional[int] = tokenizer lowerCamelCase__ : Dict = AutoConfig.from_pretrained(UpperCamelCase__ ) lowerCamelCase__ : Tuple = TFAutoModel.from_config(UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Any = self.tokenizer(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = self.bert(**UpperCamelCase__ ) return out["pooler_output"] @require_tf @require_tensorflow_text class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: Any ): super().setUp() lowerCamelCase__ : str = [ BertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false lowerCamelCase__ : int = [TFBertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(UpperCamelCase__ , use_fast_bert_tokenizer=UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCamelCase__ : Tuple = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] lowerCamelCase__ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def lowerCamelCase_ ( self: Optional[Any] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding="""longest""" ) lowerCamelCase__ : List[str] = tf_tokenizer(UpperCamelCase__ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def lowerCamelCase_ ( self: List[Any] ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ : int = tf_tokenizer(self.paired_sentences ) lowerCamelCase__ : int = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def lowerCamelCase_ ( self: Any ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ : List[Any] = tf.function(UpperCamelCase__ ) for test_inputs in (self.test_sentences, self.paired_sentences): lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ ) lowerCamelCase__ : List[str] = compiled_tokenizer(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = tf_tokenizer(UpperCamelCase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def lowerCamelCase_ ( self: Union[str, Any] ): for tf_tokenizer in self.tf_tokenizers: lowerCamelCase__ : Dict = ModelToSave(tokenizer=UpperCamelCase__ ) lowerCamelCase__ : str = tf.convert_to_tensor(self.test_sentences ) lowerCamelCase__ : Any = model(UpperCamelCase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCamelCase__ : List[Any] = Path(UpperCamelCase__ ) / """saved.model""" model.save(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = loaded_model(UpperCamelCase__ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
41
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _A : List[Any] =logging.get_logger(__name__) _A : Dict =['''model.decoder.embed_positions.weights'''] def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str: if "emb" in name: lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]: lowerCamelCase__ : int = list(state_dict.keys() ) lowerCamelCase__ : Tuple = {} for key in keys: lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase ) if "in_proj_weight" in key: # split fused qkv proj lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :] lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :] lowerCamelCase__ : Optional[int] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCamelCase__ : str = val else: lowerCamelCase__ : Union[str, Any] = val return state_dict, enc_dec_proj_state_dict def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values lowerCamelCase__ : int = 1024 lowerCamelCase__ : int = 24 lowerCamelCase__ : List[Any] = 16 elif checkpoint == "medium": lowerCamelCase__ : Any = 1536 lowerCamelCase__ : Union[str, Any] = 48 lowerCamelCase__ : Optional[int] = 24 elif checkpoint == "large": lowerCamelCase__ : Optional[Any] = 2048 lowerCamelCase__ : Dict = 48 lowerCamelCase__ : List[Any] = 32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) lowerCamelCase__ : Any = MusicgenDecoderConfig( hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , ) return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase ) lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase ) lowerCamelCase__ : Any = fairseq_model.lm.state_dict() lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict( UpperCamelCase , hidden_size=decoder_config.hidden_size ) lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" ) lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase ) if len(UpperCamelCase ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(UpperCamelCase ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase ) # check we can do a forward pass lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" ) lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase ) # set the appropriate bos/pad token ids lowerCamelCase__ : Union[str, Any] = 2048 lowerCamelCase__ : List[str] = 2048 # set other default generation config params lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate ) lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : List[Any] = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(UpperCamelCase ) processor.push_to_hub(UpperCamelCase ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) _A : List[str] =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
41
1
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _snake_case : Any = logging.get_logger(__name__) _snake_case : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } _snake_case : Tuple = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any] ): for attribute in key.split('.' ): __lowerCAmelCase = getattr(__lowerCAmelCase, __lowerCAmelCase ) if weight_type is not None: __lowerCAmelCase = getattr(__lowerCAmelCase, __lowerCAmelCase ).shape else: __lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ): __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(__lowerCAmelCase )[0].split('.' )[-2] __lowerCAmelCase = mapped_key.replace('*', __lowerCAmelCase ) if "weight_g" in name: __lowerCAmelCase = 'weight_g' elif "weight_v" in name: __lowerCAmelCase = 'weight_v' elif "bias" in name: __lowerCAmelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = 'weight' else: __lowerCAmelCase = None set_recursively(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ): __lowerCAmelCase = full_name.split('conv_layers.' )[-1] __lowerCAmelCase = name.split('.' ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any]=None, lowerCAmelCase_ : Optional[Any]=None, lowerCAmelCase_ : Optional[Any]=True ): if config_path is not None: __lowerCAmelCase = UniSpeechSatConfig.from_pretrained(__lowerCAmelCase ) else: __lowerCAmelCase = UniSpeechSatConfig() __lowerCAmelCase = '' if is_finetuned: __lowerCAmelCase = UniSpeechSatForCTC(__lowerCAmelCase ) else: __lowerCAmelCase = UniSpeechSatForPreTraining(__lowerCAmelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowerCAmelCase = model[0].eval() recursively_load_weights(__lowerCAmelCase, __lowerCAmelCase ) hf_wavavec.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": _snake_case : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) _snake_case : Any = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
371
from math import factorial class _UpperCAmelCase : """simple docstring""" def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> Union[str, Any]: __lowerCAmelCase = real if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = [1] * rank else: __lowerCAmelCase = rank def __repr__( self : Optional[Any] ) -> Tuple: return ( f"""{self.real}+""" f"""{"+".join(str(lowerCAmelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowercase ( self : str ) -> Dict: __lowerCAmelCase = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , lowerCAmelCase_ ) def __add__( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> Optional[Any]: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return Dual(self.real + other , self.duals ) __lowerCAmelCase = self.duals.copy() __lowerCAmelCase = other.duals.copy() if len(lowerCAmelCase_ ) > len(lowerCAmelCase_ ): o_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) ) elif len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ): s_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) ) __lowerCAmelCase = [] for i in range(len(lowerCAmelCase_ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , lowerCAmelCase_ ) a_ = __add__ def __sub__( self : int , lowerCAmelCase_ : Dict ) -> Optional[Any]: return self + other * -1 def __mul__( self : int , lowerCAmelCase_ : Optional[int] ) -> Dict: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , lowerCAmelCase_ ) __lowerCAmelCase = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , lowerCAmelCase_ ) a_ = __mul__ def __truediv__( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Dict: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , lowerCAmelCase_ ) raise ValueError def __floordiv__( self : str , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , lowerCAmelCase_ ) raise ValueError def __pow__( self : Tuple , lowerCAmelCase_ : Dict ) -> List[str]: if n < 0 or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self __lowerCAmelCase = self for _ in range(n - 1 ): x *= self return x def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ): if not callable(lowerCAmelCase_ ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(lowerCAmelCase_, (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ): raise ValueError('differentiate() requires an int as input for order' ) __lowerCAmelCase = Dual(lowerCAmelCase_, 1 ) __lowerCAmelCase = func(lowerCAmelCase_ ) if order == 0: return result.real return result.duals[order - 1] * factorial(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() def a_ ( lowerCAmelCase_ : int ): return y**2 * y**4 print(differentiate(f, 9, 2))
207
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Tuple = logging.get_logger(__name__) a__ : Optional[int] = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class a_ ( _lowerCAmelCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = "levit" def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=3 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=16 , _lowerCamelCase=[128, 256, 384] , _lowerCamelCase=[4, 8, 12] , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=[16, 16, 16] , _lowerCamelCase=0 , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=0.0_2 , **_lowerCamelCase , ) ->List[str]: super().__init__(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : int = kernel_size SCREAMING_SNAKE_CASE : List[Any] = stride SCREAMING_SNAKE_CASE : Any = padding SCREAMING_SNAKE_CASE : Tuple = hidden_sizes SCREAMING_SNAKE_CASE : str = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = depths SCREAMING_SNAKE_CASE : Optional[int] = key_dim SCREAMING_SNAKE_CASE : str = drop_path_rate SCREAMING_SNAKE_CASE : List[str] = patch_size SCREAMING_SNAKE_CASE : Optional[Any] = attention_ratio SCREAMING_SNAKE_CASE : int = mlp_ratio SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class a_ ( _lowerCAmelCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = version.parse('1.11' ) @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCAmelCase ( self ) ->float: return 1e-4
313
from __future__ import annotations _snake_case : Any = "Muhammad Umer Farooq" _snake_case : Optional[int] = "MIT" _snake_case : Union[str, Any] = "1.0.0" _snake_case : Optional[Any] = "Muhammad Umer Farooq" _snake_case : List[Any] = "contact@muhammadumerfarooq.me" _snake_case : Dict = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : str ) -> None: super().__init__() __snake_case : list[str] = [] __snake_case : Any = domain def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Any = parse.urljoin(self.domain , lowerCamelCase ) self.urls.append(lowerCamelCase ) def lowerCAmelCase_ ( __lowerCamelCase ): return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] ) def lowerCAmelCase_ ( __lowerCamelCase ): return parse.urlparse(__lowerCamelCase ).netloc def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ): __snake_case : Tuple = get_domain_name(__lowerCamelCase ) # Initialize the parser __snake_case : Dict = Parser(__lowerCamelCase ) try: # Open URL __snake_case : Any = requests.get(__lowerCamelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : List[str] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[str] = requests.get(__lowerCamelCase ) # Get the valid email. __snake_case : Any = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCamelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCamelCase ) if __name__ == "__main__": _snake_case : Union[str, Any] = emails_from_url("https://github.com") print(f'''{len(emails)} emails found:''') print("\n".join(sorted(emails)))
123
0
from sklearn.metrics import recall_score import datasets UpperCamelCase_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" UpperCamelCase_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" UpperCamelCase_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def __a ( self :int) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''')), '''references''': datasets.Sequence(datasets.Value('''int32''')), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32'''), '''references''': datasets.Value('''int32'''), }) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def __a ( self :Tuple , _lowercase :Optional[Any] , _lowercase :str , _lowercase :Optional[int]=None , _lowercase :Dict=1 , _lowercase :List[str]="binary" , _lowercase :Tuple=None , _lowercase :str="warn" , ) -> Dict: UpperCAmelCase_ = recall_score( _lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , ) return {"recall": float(_lowercase) if score.size == 1 else score}
344
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]: '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = [] for rt in rc.restypes: UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) UpperCAmelCase_ = torch.tensor( __UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , ) UpperCAmelCase_ = torch.tensor( __UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , ) UpperCAmelCase_ = torch.tensor( __UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , ) UpperCAmelCase_ = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype] UpperCAmelCase_ = restype_atomaa_mask[protein_aatype] UpperCAmelCase_ = residx_atomaa_mask UpperCAmelCase_ = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype] UpperCAmelCase_ = residx_atomaa_to_atomaa.long() # create the corresponding mask UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): UpperCAmelCase_ = rc.restype_atoa[restype_letter] UpperCAmelCase_ = rc.residue_atoms[restype_name] for atom_name in atom_names: UpperCAmelCase_ = rc.atom_order[atom_name] UpperCAmelCase_ = 1 UpperCAmelCase_ = restype_atomaa_mask[protein_aatype] UpperCAmelCase_ = residx_atomaa_mask return protein def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]: '''simple docstring''' UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray ) UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) ) return out
344
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'blip_text_model' def __init__( self , __a=3_05_24 , __a=7_68 , __a=7_68 , __a=30_72 , __a=7_68 , __a=12 , __a=8 , __a=5_12 , __a="gelu" , __a=1e-12 , __a=0.0 , __a=0.0 , __a=0.02 , __a=3_05_22 , __a=2 , __a=0 , __a=1_02 , __a=True , __a=True , **__a , ) -> List[str]: '''simple docstring''' super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = encoder_hidden_size _UpperCamelCase = intermediate_size _UpperCamelCase = projection_dim _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = max_position_embeddings _UpperCamelCase = layer_norm_eps _UpperCamelCase = hidden_act _UpperCamelCase = initializer_range _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = is_decoder _UpperCamelCase = use_cache @classmethod def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__a) _UpperCamelCase = cls.get_config_dict(__a , **__a) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''') == "blip": _UpperCamelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(__a , **__a) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'blip_vision_model' def __init__( self , __a=7_68 , __a=30_72 , __a=5_12 , __a=12 , __a=12 , __a=3_84 , __a=16 , __a="gelu" , __a=1e-5 , __a=0.0 , __a=1e-10 , **__a , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**__a) _UpperCamelCase = hidden_size _UpperCamelCase = intermediate_size _UpperCamelCase = projection_dim _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = patch_size _UpperCamelCase = image_size _UpperCamelCase = initializer_range _UpperCamelCase = attention_dropout _UpperCamelCase = layer_norm_eps _UpperCamelCase = hidden_act @classmethod def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__a) _UpperCamelCase = cls.get_config_dict(__a , **__a) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''') == "blip": _UpperCamelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(__a , **__a) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'blip' lowercase__ = True def __init__( self , __a=None , __a=None , __a=5_12 , __a=2.6592 , __a=2_56 , **__a , ) -> Dict: '''simple docstring''' super().__init__(**__a) if text_config is None: _UpperCamelCase = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''') if vision_config is None: _UpperCamelCase = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''') _UpperCamelCase = BlipTextConfig(**__a) _UpperCamelCase = BlipVisionConfig(**__a) _UpperCamelCase = self.vision_config.hidden_size _UpperCamelCase = projection_dim _UpperCamelCase = logit_scale_init_value _UpperCamelCase = 1.0 _UpperCamelCase = 0.02 _UpperCamelCase = image_text_hidden_size @classmethod def UpperCAmelCase ( cls , __a , __a , **__a) -> List[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__) _UpperCamelCase = self.text_config.to_dict() _UpperCamelCase = self.vision_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
194
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Dict = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "xmod" def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("en_XX",) , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout __UpperCamelCase = pre_norm __UpperCamelCase = adapter_reduction_factor __UpperCamelCase = adapter_layer_norm __UpperCamelCase = adapter_reuse_layer_norm __UpperCamelCase = ln_before_adapter __UpperCamelCase = list(__UpperCAmelCase ) __UpperCamelCase = default_language class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def UpperCAmelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
263
"""simple docstring""" UpperCamelCase : Union[str, Any] = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]: # Return True if there is node that has not iterated. __UpperCamelCase = [False] * len(snake_case ) __UpperCamelCase = [s] __UpperCamelCase = True while queue: __UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(snake_case ) __UpperCamelCase = True __UpperCamelCase = u return visited[t] def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]: __UpperCamelCase = [-1] * (len(snake_case )) __UpperCamelCase = 0 __UpperCamelCase = [] __UpperCamelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(snake_case , snake_case , snake_case , snake_case ): __UpperCamelCase = float('Inf' ) __UpperCamelCase = sink while s != source: # Find the minimum value in select path __UpperCamelCase = min(snake_case , graph[parent[s]][s] ) __UpperCamelCase = parent[s] max_flow += path_flow __UpperCamelCase = sink while v != source: __UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __UpperCamelCase = parent[v] for i in range(len(snake_case ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
263
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ): '''simple docstring''' def get_dataset(__snake_case : Optional[Any] ): UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Any = get_dataset(__snake_case ) UpperCAmelCase_ : str = get_dataset(__snake_case ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [] for epoch in range(__snake_case ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch UpperCAmelCase_ : List[Any] = model(__snake_case ) UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case ) accelerator.backward(__snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase (nn.Module ): '''simple docstring''' def __init__( self ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]: return x * self.a + self.b class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[Any] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' ) accelerator.save_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Optional[Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(_UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' ) accelerator.save_state(_UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_UpperCamelCase ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item() UpperCAmelCase_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(4_2 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Dict = optimizer.state_dict() self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : Union[str, Any] = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Any = Accelerator() with self.assertRaises(_UpperCamelCase ) as ve: accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __UpperCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase_ : Dict = scheduler.state_dict() train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(_UpperCamelCase , scheduler.state_dict() ) def __UpperCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase ) UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
29
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase: Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class _lowercase ( lowerCAmelCase ): """simple docstring""" def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" a = {} a = {} if prompt is not None: a = prompt if generate_kwargs is not None: a = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: a = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) a = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" a = load_image(lowerCamelCase_ ) if prompt is not None: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError( F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. ''' "Note also that one single text can be provided for conditional image to text generation." ) a = self.model.config.model_type if model_type == "git": a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids a = [self.tokenizer.cls_token_id] + input_ids a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''' ) else: a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: a = None return model_inputs def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , lowerCamelCase_ ) and all(x is None for x in model_inputs["input_ids"] ) ): a = None if generate_kwargs is None: a = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. a = model_inputs.pop(self.model.main_input_name ) a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ ) return model_outputs def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = [] for output_ids in model_outputs: a = { "generated_text": self.tokenizer.decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , ) } records.append(lowerCamelCase_ ) return records
227
0
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
53
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class a__ : def __init__( self : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : Tuple=13, lowerCAmelCase : List[Any]=2, lowerCAmelCase : Tuple=24, lowerCAmelCase : Any=16, lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : Tuple=True, lowerCAmelCase : Optional[int]=32, lowerCAmelCase : Optional[int]=5, lowerCAmelCase : Optional[int]=4, lowerCAmelCase : Optional[int]=37, lowerCAmelCase : Tuple="gelu", lowerCAmelCase : str=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : List[Any]=10, lowerCAmelCase : List[Any]=0.02, lowerCAmelCase : List[str]=None, lowerCAmelCase : Any=2, lowerCAmelCase : str=2, ) -> Union[str, Any]: lowercase : str = parent lowercase : Optional[int] = batch_size lowercase : str = patch_size lowercase : List[Any] = max_length lowercase : Optional[Any] = num_mel_bins lowercase : int = is_training lowercase : Dict = use_labels lowercase : List[str] = hidden_size lowercase : str = num_hidden_layers lowercase : Any = num_attention_heads lowercase : List[str] = intermediate_size lowercase : int = hidden_act lowercase : Optional[Any] = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : int = type_sequence_label_size lowercase : Optional[int] = initializer_range lowercase : int = scope lowercase : int = frequency_stride lowercase : Dict = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowercase : Tuple = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 lowercase : Dict = (self.max_length - self.patch_size) // self.time_stride + 1 lowercase : Any = frequency_out_dimension * time_out_dimension lowercase : List[str] = num_patches + 2 def lowercase ( self : int ) -> Optional[int]: lowercase : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) lowercase : List[Any] = None if self.use_labels: lowercase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase : str = self.get_config() return config, input_values, labels def lowercase ( self : List[str] ) -> Any: return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, ) def lowercase ( self : str, lowerCAmelCase : List[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any] ) -> Optional[int]: lowercase : Any = ASTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowercase : Any = model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase ( self : Any ) -> Tuple: lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict = config_and_inputs lowercase : Union[str, Any] = {'input_values': input_values} return config, inputs_dict @require_torch class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ): _lowerCamelCase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowerCamelCase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowercase ( self : Any, lowerCAmelCase : Any, lowerCAmelCase : Tuple, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> Tuple: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def lowercase ( self : Optional[Any] ) -> Dict: lowercase : List[Any] = ASTModelTester(self ) lowercase : Any = ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 ) def lowercase ( self : Tuple ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='AST does not use inputs_embeds' ) def lowercase ( self : Tuple ) -> List[Any]: pass def lowercase ( self : Union[str, Any] ) -> List[str]: lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Optional[Any] = model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowercase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) ) def lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Optional[int] = model_class(lowerCAmelCase ) lowercase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : List[Any] = [*signature.parameters.keys()] lowercase : str = ['input_values'] self.assertListEqual(arg_names[:1], lowerCAmelCase ) def lowercase ( self : Optional[int] ) -> Tuple: lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) @slow def lowercase ( self : List[str] ) -> Optional[Any]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Dict = ASTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( ) -> Any: '''simple docstring''' lowercase : Tuple = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' ) lowercase , lowercase : List[str] = torchaudio.load(_UpperCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class a__ ( unittest.TestCase ): @cached_property def lowercase ( self : Union[str, Any] ) -> Optional[int]: return ( ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ) if is_torchaudio_available() else None ) @slow def lowercase ( self : Any ) -> Optional[Any]: lowercase : List[str] = self.default_feature_extractor lowercase : Tuple = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(lowerCAmelCase ) lowercase : List[str] = self.default_feature_extractor lowercase , lowercase : Optional[int] = prepare_audio() lowercase : List[str] = audio.squeeze().numpy() lowercase : List[Any] = feature_extractor(lowerCAmelCase, sampling_rate=lowerCAmelCase, return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase : List[Any] = model(**lowerCAmelCase ) # verify the logits lowercase : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape, lowerCAmelCase ) lowercase : Any = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
53
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCAmelCase__ = logging.getLogger(__name__) UpperCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a : _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class a : _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'The input training data file (a text file).'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) _snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether ot not to use whole word mask.'} ) _snake_case : float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) _snake_case : float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) _snake_case : int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) _snake_case : int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ,lowercase = None ,): """simple docstring""" def _dataset(lowercase ,lowercase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size ,ref_path=lowercase ,) return LineByLineTextDataset(tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size ) else: return TextDataset( tokenizer=lowercase ,file_path=lowercase ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase ,) if evaluate: return _dataset(args.eval_data_file ,args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowercase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file ,args.train_ref_file ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" ,lowercase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: _UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: _UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: _UpperCAmelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,) else: logger.info("""Training new model from scratch""" ) _UpperCAmelCase = AutoModelWithLMHead.from_config(lowercase ) model.resize_token_embeddings(len(lowercase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: _UpperCAmelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: _UpperCAmelCase = min(data_args.block_size ,tokenizer.max_len ) # Get datasets _UpperCAmelCase = ( get_dataset(lowercase ,tokenizer=lowercase ,cache_dir=model_args.cache_dir ) if training_args.do_train else None ) _UpperCAmelCase = ( get_dataset(lowercase ,tokenizer=lowercase ,evaluate=lowercase ,cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": _UpperCAmelCase = DataCollatorForPermutationLanguageModeling( tokenizer=lowercase ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,) else: if data_args.mlm and data_args.whole_word_mask: _UpperCAmelCase = DataCollatorForWholeWordMask( tokenizer=lowercase ,mlm_probability=data_args.mlm_probability ) else: _UpperCAmelCase = DataCollatorForLanguageModeling( tokenizer=lowercase ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _UpperCAmelCase = Trainer( model=lowercase ,args=lowercase ,data_collator=lowercase ,train_dataset=lowercase ,eval_dataset=lowercase ,prediction_loss_only=lowercase ,) # Training if training_args.do_train: _UpperCAmelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowercase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = math.exp(eval_output["""eval_loss"""] ) _UpperCAmelCase = {"""perplexity""": perplexity} _UpperCAmelCase = os.path.join(training_args.output_dir ,"""eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowercase ,"""w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" ,lowercase ,str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowercase ) return results def __UpperCAmelCase ( lowercase ): """simple docstring""" main() if __name__ == "__main__": main()
289
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Tuple ): _a : str = 'hf-internal-testing/tiny-random-t5' _a : List[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase ) _a : Dict = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) _a : Any = tokenizer('This is me' ,return_tensors='pt' ) _a : int = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) _a : str = model.generate(**_UpperCAmelCase ) _a : Optional[Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) _a : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) _a : Optional[int] = model_reloaded.generate(**_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ) ) def __lowercase ( self : List[str] ): _a : str = 'hf-internal-testing/tiny-random-t5' _a : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) _a : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_UpperCAmelCase ): model.save_pretrained(_UpperCAmelCase ) _a : str = model.reverse_bettertransformer() model.save_pretrained(_UpperCAmelCase )
107
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __magic_name__ : def __init__( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : str=32 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=[0, 1, 2, 3] ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[Any]=0.02 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[1, 384, 24, 24] ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : List[str]=None ,): _a : int = parent _a : Optional[Any] = batch_size _a : str = image_size _a : str = patch_size _a : Any = num_channels _a : Optional[Any] = is_training _a : str = use_labels _a : Union[str, Any] = hidden_size _a : int = num_hidden_layers _a : str = backbone_out_indices _a : List[Any] = num_attention_heads _a : Any = intermediate_size _a : List[Any] = hidden_act _a : int = hidden_dropout_prob _a : Optional[int] = attention_probs_dropout_prob _a : Optional[int] = initializer_range _a : Tuple = num_labels _a : Tuple = backbone_featmap_shape _a : Optional[int] = scope _a : Dict = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) _a : str = (image_size // patch_size) ** 2 _a : Optional[int] = num_patches + 1 def __lowercase ( self : Dict ): _a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Any = None if self.use_labels: _a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) _a : int = self.get_config() return config, pixel_values, labels def __lowercase ( self : int ): _a : Dict = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,) def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = DPTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ): _a : List[Any] = self.num_labels _a : Dict = DPTForDepthEstimation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : str = model(_UpperCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) ) def __lowercase ( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ): _a : Optional[int] = self.num_labels _a : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Optional[int] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowercase ( self : List[Any] ): _a : List[str] = self.prepare_config_and_inputs() _a , _a , _a : List[Any] = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowerCAmelCase : int = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Dict = False lowerCAmelCase : List[str] = False def __lowercase ( self : Union[str, Any] ): _a : Union[str, Any] = DPTModelTester(self ) _a : int = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def __lowercase ( self : Tuple ): pass def __lowercase ( self : List[str] ): _a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Union[str, Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) ) def __lowercase ( self : Any ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Optional[int] = model_class(_UpperCAmelCase ) _a : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Union[str, Any] = [*signature.parameters.keys()] _a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : int ): _a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase ) def __lowercase ( self : str ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _a : Tuple = True if model_class in get_values(_UpperCAmelCase ): continue _a : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : Union[str, Any] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] = False _a : List[Any] = True if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue _a : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): _a , _a : str = self.model_tester.prepare_config_and_inputs_for_common() _a : List[str] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _a : Tuple = model_class(config=_UpperCAmelCase ) # Skip the check for the backbone _a : Any = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": _a : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowercase ( self : Optional[Any] ): pass @slow def __lowercase ( self : Union[str, Any] ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: _a : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowercase ( self : Any ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] = 'add' with self.assertRaises(_UpperCAmelCase ): _a : List[str] = DPTForDepthEstimation(_UpperCAmelCase ) def __lowerCamelCase ( ) -> Optional[Any]: _a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Any ): _a : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) _a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase ) _a : Optional[int] = prepare_img() _a : Optional[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Tuple = model(**_UpperCAmelCase ) _a : Union[str, Any] = outputs.predicted_depth # verify the predicted depth _a : str = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape ,_UpperCAmelCase ) _a : int = torch.tensor( [[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
107
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Any = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Optional[int] = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Optional[Any] = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Tuple = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Tuple = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
71
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case ) _lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss _lowerCAmelCase = -(labels.shape[-1] * loss.item()) _lowerCAmelCase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
70
0
"""simple docstring""" from sklearn.metrics import recall_score import datasets __UpperCamelCase : Optional[int] = ''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' __UpperCamelCase : Union[str, Any] = ''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' __UpperCamelCase : List[str] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None , _snake_case="warn" , ): """simple docstring""" lowerCAmelCase = recall_score( _snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , ) return {"recall": float(_snake_case ) if score.size == 1 else score}
309
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = LDMTextToImagePipeline lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase__ = False def snake_case__ ( self): '''simple docstring''' torch.manual_seed(0) _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) _lowerCAmelCase : Optional[int] = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=__a, set_alpha_to_one=__a, ) torch.manual_seed(0) _lowerCAmelCase : int = AutoencoderKL( block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, ) torch.manual_seed(0) _lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) _lowerCAmelCase : Any = CLIPTextModel(__a) _lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") _lowerCAmelCase : List[Any] = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def snake_case__ ( self, __a, __a=0): '''simple docstring''' if str(__a).startswith("mps"): _lowerCAmelCase : Any = torch.manual_seed(__a) else: _lowerCAmelCase : Optional[Any] = torch.Generator(device=__a).manual_seed(__a) _lowerCAmelCase : Dict = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : Dict = self.get_dummy_components() _lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**__a) pipe.to(__a) pipe.set_progress_bar_config(disable=__a) _lowerCAmelCase : str = self.get_dummy_inputs(__a) _lowerCAmelCase : Optional[int] = pipe(**__a).images _lowerCAmelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _lowerCAmelCase : Any = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self, __a, __a=torch.floataa, __a=0): '''simple docstring''' _lowerCAmelCase : Optional[int] = torch.manual_seed(__a) _lowerCAmelCase : Any = np.random.RandomState(__a).standard_normal((1, 4, 32, 32)) _lowerCAmelCase : Optional[int] = torch.from_numpy(__a).to(device=__a, dtype=__a) _lowerCAmelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__a) pipe.set_progress_bar_config(disable=__a) _lowerCAmelCase : Union[str, Any] = self.get_inputs(__a) _lowerCAmelCase : List[Any] = pipe(**__a).images _lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _lowerCAmelCase : List[str] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878]) _lowerCAmelCase : str = np.abs(expected_slice - image_slice).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self, __a, __a=torch.floataa, __a=0): '''simple docstring''' _lowerCAmelCase : Optional[Any] = torch.manual_seed(__a) _lowerCAmelCase : Dict = np.random.RandomState(__a).standard_normal((1, 4, 32, 32)) _lowerCAmelCase : str = torch.from_numpy(__a).to(device=__a, dtype=__a) _lowerCAmelCase : Dict = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__a) pipe.set_progress_bar_config(disable=__a) _lowerCAmelCase : Optional[Any] = self.get_inputs(__a) _lowerCAmelCase : List[str] = pipe(**__a).images[0] _lowerCAmelCase : Optional[int] = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy") _lowerCAmelCase : Dict = np.abs(expected_image - image).max() assert max_diff < 1E-3
36
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[int] = config.num_labels _lowerCAmelCase : Optional[int] = config.num_hidden_layers _lowerCAmelCase : Optional[int] = DeeRobertaModel(__a) _lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob) _lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels) @add_start_docstrings_to_model_forward(__a) def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_layers try: _lowerCAmelCase : List[Any] = self.roberta( __a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, ) _lowerCAmelCase : List[Any] = outputs[1] _lowerCAmelCase : Dict = self.dropout(__a) _lowerCAmelCase : Dict = self.classifier(__a) _lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : Tuple = e.message _lowerCAmelCase : Union[str, Any] = e.exit_layer _lowerCAmelCase : List[Any] = outputs[0] if not self.training: _lowerCAmelCase : int = entropy(__a) _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : str = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Optional[Any] = MSELoss() _lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Optional[Any] = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits _lowerCAmelCase : Optional[int] = [] for highway_exit in outputs[-1]: _lowerCAmelCase : Any = highway_exit[0] if not self.training: highway_logits_all.append(__a) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : List[str] = MSELoss() _lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(__a) if train_highway: _lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : Any = (loss,) + outputs if not self.training: _lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
36
1
"""simple docstring""" import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_sentencepiece_available(): import sentencepiece as sp _A = 5 _A = 10 @require_sentencepiece @require_tokenizers class A ( snake_case_ , unittest.TestCase ): __snake_case = SpeechaTextTokenizer __snake_case = False __snake_case = True def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase_ = sp.SentencePieceProcessor() spm_model.Load(UpperCamelCase__ ) lowerCAmelCase_ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase__ ) )] lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) ) lowerCAmelCase_ = Path(self.tmpdirname ) save_json(UpperCamelCase__, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCamelCase__, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCAmelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''<pad>''' lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ), UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ), UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(UpperCamelCase__ ), 1001 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1001 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCAmelCase_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ), [289, 50, 14, 174, 386], ) lowerCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class A ( unittest.TestCase ): __snake_case = 'valhalla/s2t_mustc_multilinguial_medium' __snake_case = 'C\'est trop cool' __snake_case = 'Esto es genial' @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 1_0000 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" self.assertIn(UpperCamelCase__, self.tokenizer.all_special_ids ) lowerCAmelCase_ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCAmelCase_ = self.tokenizer.decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, UpperCamelCase__ ) self.assertNotIn(self.tokenizer.eos_token, UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''fr''' lowerCAmelCase_ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], UpperCamelCase__ ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCAmelCase_ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
353
import string def __UpperCamelCase ( _A ): for key in range(len(string.ascii_uppercase ) ): lowerCAmelCase_ = '''''' for symbol in message: if symbol in string.ascii_uppercase: lowerCAmelCase_ = string.ascii_uppercase.find(_A ) lowerCAmelCase_ = num - key if num < 0: lowerCAmelCase_ = num + len(string.ascii_uppercase ) lowerCAmelCase_ = translated + string.ascii_uppercase[num] else: lowerCAmelCase_ = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def __UpperCamelCase ( ): lowerCAmelCase_ = input('''Encrypted message: ''' ) lowerCAmelCase_ = message.upper() decrypt(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
167
0
def lowercase_ (A : list[int] , A : list[int] , A : int ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(A ) ) def lowercase_ (A : list[list[int]] , A : int , A : list[int] , A : int ): # Base Case if index == len(A ): return True # Recursive Step for i in range(A ): if valid_coloring(graph[index] , A , A ): # Color current vertex snake_case__ : Tuple = i # Validate coloring if util_color(A , A , A , index + 1 ): return True # Backtrack snake_case__ : int = -1 return False def lowercase_ (A : list[list[int]] , A : int ): snake_case__ : Union[str, Any] = [-1] * len(A ) if util_color(A , A , A , 0 ): return colored_vertices return []
277
from math import factorial def lowercase_ (A : int , A : int , A : float ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(A , A ) or not isinstance(A , A ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) snake_case__ : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! snake_case__ : List[str] = float(factorial(A ) ) coefficient /= factorial(A ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.75))
277
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowerCamelCase : Union[str, Any] = """src/transformers""" _lowerCamelCase : str = """docs/source/en""" _lowerCamelCase : Union[str, Any] = """.""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict: """simple docstring""" with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A__ = f.readlines() # Find the start prompt. A__ = 0 while not lines[start_index].startswith(lowercase_ ): start_index += 1 start_index += 1 A__ = start_index while not lines[end_index].startswith(lowercase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowerCamelCase : Dict = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. _lowerCamelCase : Dict = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") _lowerCamelCase : Union[str, Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowerCamelCase : Dict = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. _lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowercase_ ) return [m.group(0 ) for m in matches] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = 2 if text == '''✅''' or text == '''❌''' else len(lowercase_ ) A__ = (width - text_length) // 2 A__ = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES A__ = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } A__ = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. A__ = collections.defaultdict(lowercase_ ) A__ = collections.defaultdict(lowercase_ ) A__ = collections.defaultdict(lowercase_ ) A__ = collections.defaultdict(lowercase_ ) A__ = collections.defaultdict(lowercase_ ) # Let's lookup through all transformers object (once). for attr_name in dir(lowercase_ ): A__ = None if attr_name.endswith('''Tokenizer''' ): A__ = slow_tokenizers A__ = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): A__ = fast_tokenizers A__ = attr_name[:-13] elif _re_tf_models.match(lowercase_ ) is not None: A__ = tf_models A__ = _re_tf_models.match(lowercase_ ).groups()[0] elif _re_flax_models.match(lowercase_ ) is not None: A__ = flax_models A__ = _re_flax_models.match(lowercase_ ).groups()[0] elif _re_pt_models.match(lowercase_ ) is not None: A__ = pt_models A__ = _re_pt_models.match(lowercase_ ).groups()[0] if lookup_dict is not None: while len(lowercase_ ) > 0: if attr_name in model_name_to_prefix.values(): A__ = True break # Try again after removing the last word in the name A__ = ''''''.join(camel_case_split(lowercase_ )[:-1] ) # Let's build that table! A__ = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) A__ = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). A__ = [len(lowercase_ ) + 2 for c in columns] A__ = max([len(lowercase_ ) for name in model_names] ) + 2 # Build the table per se A__ = '''|''' + '''|'''.join([_center_text(lowercase_ , lowercase_ ) for c, w in zip(lowercase_ , lowercase_ )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" A__ = {True: '''✅''', False: '''❌'''} for name in model_names: A__ = model_name_to_prefix[name] A__ = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowercase_ , lowercase_ ) for l, w in zip(lowercase_ , lowercase_ )] ) + "|\n" return table def SCREAMING_SNAKE_CASE ( lowercase_=False ) -> Any: """simple docstring""" A__ , A__ , A__ , A__ = _find_text_in_file( filename=os.path.join(lowercase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) A__ = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowercase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCamelCase : Optional[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
363
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int: """simple docstring""" A__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
231
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : dict[int, int] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 while True: SCREAMING_SNAKE_CASE__ : str = factor_map.pop(_snake_case ,_snake_case ) if factor: SCREAMING_SNAKE_CASE__ : List[str] = factor + prime while x in factor_map: x += factor SCREAMING_SNAKE_CASE__ : Tuple = factor else: SCREAMING_SNAKE_CASE__ : str = prime yield prime prime += 1 def lowercase_ ( _snake_case = 1E10 ): SCREAMING_SNAKE_CASE__ : List[str] = sieve() SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 while True: SCREAMING_SNAKE_CASE__ : List[str] = next(_snake_case ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_snake_case ) n += 2 if __name__ == "__main__": print(solution())
25
"""simple docstring""" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2 SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3 SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5 for _ in range(1 ,_snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f"""{ugly_numbers(2_0_0) = }""")
25
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def _lowerCAmelCase ( __snake_case : np.ndarray ) -> np.ndarray: __A ,__A ,__A : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def _lowerCAmelCase ( __snake_case : np.ndarray ) -> np.ndarray: return (gray > 1_27) & (gray <= 2_55) def _lowerCAmelCase ( __snake_case : np.ndarray , __snake_case : np.ndarray ) -> np.ndarray: __A : Any = np.zeros_like(__snake_case ) __A : int = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __A : int = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __A : Tuple = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __A : Dict = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowercase__ : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' lowercase__ : Union[str, Any] = np.array(Image.open(lena_path)) # kernel to be applied lowercase__ : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowercase__ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowercase__ : str = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
190
'''simple docstring''' from __future__ import annotations from math import gcd def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None: # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int: return (pow(__snake_case , 2 ) + step) % modulus for _ in range(__snake_case ): # These track the position within the cycle detection logic. __A : int = seed __A : Union[str, Any] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case ) __A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case ) __A : Any = rand_fn(__snake_case , __snake_case , __snake_case ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __A : Optional[int] = gcd(hare - tortoise , __snake_case ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __A : Union[str, Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowercase__ : str = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) lowercase__ : Optional[int] = parser.parse_args() lowercase__ : int = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"""{args.num} is probably prime""") else: lowercase__ : List[str] = args.num // divisor print(f"""{args.num} = {divisor} * {quotient}""")
190
1
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a__ = logging.get_logger(__name__) # pylint: disable=invalid-name a__ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=8 ) -> str: _snake_case : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _snake_case : int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=512 , SCREAMING_SNAKE_CASE__ : Any=512 ) -> Dict: _snake_case : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) _snake_case : Union[str, Any] = np.array(pil_image.convert("""RGB""" ) ) _snake_case : List[str] = arr.astype(np.floataa ) / 1_2_7.5 - 1 _snake_case : str = np.transpose(SCREAMING_SNAKE_CASE__ , [2, 0, 1] ) _snake_case : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) return image class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : VQModel , ) -> int: """simple docstring""" super().__init__() self.register_modules( unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , ) _snake_case : Any = 2 ** (len(self.movq.config.block_out_channels) - 1) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str) -> Tuple: """simple docstring""" _snake_case : List[str] = min(int(num_inference_steps * strength) , lowerCAmelCase) _snake_case : List[Any] = max(num_inference_steps - init_timestep , 0) _snake_case : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str=None) -> Any: """simple docstring""" if not isinstance(lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase)}''') _snake_case : Optional[int] = image.to(device=lowerCAmelCase , dtype=lowerCAmelCase) _snake_case : int = batch_size * num_images_per_prompt if image.shape[1] == 4: _snake_case : Any = image else: if isinstance(lowerCAmelCase , lowerCAmelCase) and len(lowerCAmelCase) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCAmelCase)}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') elif isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : Dict = [ self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCAmelCase) ] _snake_case : Tuple = torch.cat(lowerCAmelCase , dim=0) else: _snake_case : Dict = self.movq.encode(lowerCAmelCase).latent_dist.sample(lowerCAmelCase) _snake_case : Dict = self.movq.config.scaling_factor * init_latents _snake_case : Optional[int] = torch.cat([init_latents] , dim=0) _snake_case : str = init_latents.shape _snake_case : List[str] = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase) # get latents _snake_case : int = self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) _snake_case : str = init_latents return latents def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[str]=0) -> Optional[Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""") _snake_case : int = torch.device(F'''cuda:{gpu_id}''') _snake_case : Optional[int] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase , lowerCAmelCase) def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Optional[Any]=0) -> Optional[Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""") _snake_case : Optional[int] = torch.device(F'''cuda:{gpu_id}''') if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _snake_case : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: _snake_case , _snake_case : Dict = cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase) # We'll offload the last model manually. _snake_case : Union[str, Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase_ ( self : List[str]) -> Tuple: """simple docstring""" if not hasattr(self.unet , """_hf_hook"""): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase , """_hf_hook""") and hasattr(module._hf_hook , """execution_device""") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(lowerCAmelCase) def __call__( self : List[str] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 4.0 , lowerCAmelCase : float = 0.3 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> int: """simple docstring""" _snake_case : Any = self._execution_device _snake_case : List[Any] = guidance_scale > 1.0 if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : List[Any] = torch.cat(lowerCAmelCase , dim=0) _snake_case : str = image_embeds.shape[0] if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : Tuple = torch.cat(lowerCAmelCase , dim=0) if do_classifier_free_guidance: _snake_case : Dict = image_embeds.repeat_interleave(lowerCAmelCase , dim=0) _snake_case : Optional[int] = negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0) _snake_case : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase) if not isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : str = [image] if not all(isinstance(lowerCAmelCase , (PIL.Image.Image, torch.Tensor)) for i in image): raise ValueError( F'''Input is in incorrect format: {[type(lowerCAmelCase) for i in image]}. Currently, we only support PIL image and pytorch tensor''') _snake_case : List[Any] = torch.cat([prepare_image(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for i in image] , dim=0) _snake_case : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase) _snake_case : int = self.movq.encode(lowerCAmelCase)["""latents"""] _snake_case : Optional[Any] = latents.repeat_interleave(lowerCAmelCase , dim=0) self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase) _snake_case , _snake_case : List[str] = self.get_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) _snake_case : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt) _snake_case , _snake_case : List[Any] = downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor) _snake_case : Tuple = self.prepare_latents( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase) for i, t in enumerate(self.progress_bar(lowerCAmelCase)): # expand the latents if we are doing classifier free guidance _snake_case : Any = torch.cat([latents] * 2) if do_classifier_free_guidance else latents _snake_case : Tuple = {"""image_embeds""": image_embeds} _snake_case : Union[str, Any] = self.unet( sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0] if do_classifier_free_guidance: _snake_case , _snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1) _snake_case , _snake_case : Dict = noise_pred.chunk(2) _snake_case , _snake_case : Tuple = variance_pred.chunk(2) _snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , """variance_type""") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _snake_case , _snake_case : Optional[int] = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 _snake_case : Dict = self.scheduler.step( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0] # post-processing _snake_case : Dict = self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase)["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''') if output_type in ["np", "pil"]: _snake_case : Dict = image * 0.5 + 0.5 _snake_case : List[str] = image.clamp(0 , 1) _snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _snake_case : int = self.numpy_to_pil(lowerCAmelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase)
317
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , ) _snake_case : Tuple = field _snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths} _snake_case : int = Json( cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , ) def UpperCamelCase_ ( self : Any) -> Tuple: """simple docstring""" if self.streaming: _snake_case : int = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _snake_case : Dict = None _snake_case : Optional[int] = None _snake_case : Optional[Any] = None _snake_case : str = None self.builder.download_and_prepare( download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , ) _snake_case : List[str] = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory) return dataset class snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''') _snake_case : Optional[Any] = dataset _snake_case : str = path_or_buf _snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _snake_case : Tuple = num_proc _snake_case : Dict = """utf-8""" _snake_case : str = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any]) -> int: """simple docstring""" _snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase) _snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""") _snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False) _snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True) _snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''') if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer: _snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' """ was passed. Please provide a local path instead.""") _snake_case : Tuple = self._write( file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs) return written def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]: """simple docstring""" _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args _snake_case : int = query_table( table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , ) _snake_case : Optional[Any] = batch.to_pandas().to_json( path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase) if not json_str.endswith("""\n"""): json_str += "\n" return json_str.encode(self.encoding) def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int: """simple docstring""" _snake_case : Optional[int] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): _snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs)) written += file_obj.write(lowerCAmelCase) else: _snake_case , _snake_case : str = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowerCAmelCase) return written
317
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "tokenizer"] __UpperCamelCase = "OwlViTImageProcessor" __UpperCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , _a=None , _a=None , **_a ): """simple docstring""" lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _a , ) lowerCamelCase = kwargs.pop("""feature_extractor""" ) lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a="max_length" , _a="np" , **_a ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )): lowerCamelCase = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )] elif isinstance(_a , _a ) and isinstance(text[0] , _a ): lowerCamelCase = [] # Maximum number of queries across batch lowerCamelCase = max([len(_a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_a ) != max_num_queries: lowerCamelCase = t + [""" """] * (max_num_queries - len(_a )) lowerCamelCase = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a ) encodings.append(_a ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": lowerCamelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) lowerCamelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) lowerCamelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) lowerCamelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) lowerCamelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) lowerCamelCase = BatchEncoding() lowerCamelCase = input_ids lowerCamelCase = attention_mask if query_images is not None: lowerCamelCase = BatchEncoding() lowerCamelCase = self.image_processor( _a , return_tensors=_a , **_a ).pixel_values lowerCamelCase = query_pixel_values if images is not None: lowerCamelCase = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: lowerCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def _lowerCAmelCase ( self , *_a , **_a ): """simple docstring""" return self.image_processor.post_process(*_a , **_a ) def _lowerCAmelCase ( self , *_a , **_a ): """simple docstring""" return self.image_processor.post_process_object_detection(*_a , **_a ) def _lowerCAmelCase ( self , *_a , **_a ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*_a , **_a ) def _lowerCAmelCase ( self , *_a , **_a ): """simple docstring""" return self.tokenizer.batch_decode(*_a , **_a ) def _lowerCAmelCase ( self , *_a , **_a ): """simple docstring""" return self.tokenizer.decode(*_a , **_a ) @property def _lowerCAmelCase ( self ): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , ) return self.image_processor_class @property def _lowerCAmelCase ( self ): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , ) return self.image_processor
168
"""simple docstring""" from __future__ import annotations class __magic_name__ : '''simple docstring''' def __init__( self , _a ): """simple docstring""" lowerCamelCase = TypeError( """Matrices must be formed from a list of zero or more lists containing at """ """least one and the same number of values, each of which must be of type """ """int or float.""" ) if len(_a ) != 0: lowerCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(_a ) != cols: raise error for value in row: if not isinstance(_a , (int, float) ): raise error lowerCamelCase = rows else: lowerCamelCase = [] def _lowerCAmelCase ( self ): """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCAmelCase ( self ): """simple docstring""" return len(self.rows ) @property def _lowerCAmelCase ( self ): """simple docstring""" return len(self.rows[0] ) @property def _lowerCAmelCase ( self ): """simple docstring""" return (self.num_rows, self.num_columns) @property def _lowerCAmelCase ( self ): """simple docstring""" return self.order[0] == self.order[1] def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(_a ) def _lowerCAmelCase ( self ): """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCAmelCase ( self ): """simple docstring""" return bool(self.determinant() ) def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(_a ).determinant() def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(_a , _a ) return -1 * self.get_minor(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" return Matrix( [ [self.get_minor(_a , _a ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCAmelCase ( self ): """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.determinant() if not determinant: raise TypeError("""Only matrices with a non-zero determinant have an inverse""" ) return self.adjugate() * (1 / determinant) def __repr__( self ): """simple docstring""" return str(self.rows ) def __str__( self ): """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ """[""" + """. """.join([str(_a ) for value in row] ) + """.]""" for row in self.rows ] ) + "]" ) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = TypeError("""Row must be a list containing all ints and/or floats""" ) if not isinstance(_a , _a ): raise type_error for value in row: if not isinstance(_a , (int, float) ): raise type_error if len(_a ) != self.num_columns: raise ValueError( """Row must be equal in length to the other rows in the matrix""" ) if position is None: self.rows.append(_a ) else: lowerCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = TypeError( """Column must be a list containing all ints and/or floats""" ) if not isinstance(_a , _a ): raise type_error for value in column: if not isinstance(_a , (int, float) ): raise type_error if len(_a ) != self.num_rows: raise ValueError( """Column must be equal in length to the other columns in the matrix""" ) if position is None: lowerCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: lowerCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , _a ): """simple docstring""" if not isinstance(_a , _a ): return NotImplemented return self.rows == other.rows def __ne__( self , _a ): """simple docstring""" return not self == other def __neg__( self ): """simple docstring""" return self * -1 def __add__( self , _a ): """simple docstring""" if self.order != other.order: raise ValueError("""Addition requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , _a ): """simple docstring""" if self.order != other.order: raise ValueError("""Subtraction requires matrices of the same order""" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , _a ): """simple docstring""" if isinstance(_a , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(_a , _a ): if self.num_columns != other.num_rows: raise ValueError( """The number of columns in the first matrix must """ """be equal to the number of rows in the second""" ) return Matrix( [ [Matrix.dot_product(_a , _a ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( """A Matrix can only be multiplied by an int, float, or another matrix""" ) def __pow__( self , _a ): """simple docstring""" if not isinstance(_a , _a ): raise TypeError("""A Matrix can only be raised to the power of an int""" ) if not self.is_square: raise ValueError("""Only square matrices can be raised to a power""" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( """Only invertable matrices can be raised to a negative power""" ) lowerCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCAmelCase ( cls , _a , _a ): """simple docstring""" return sum(row[i] * column[i] for i in range(len(_a ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
168
1
'''simple docstring''' from __future__ import annotations from statistics import mean def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ): lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowercase ): lowerCamelCase_ = burst_time[i] lowerCamelCase_ = [] lowerCamelCase_ = 0 lowerCamelCase_ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCamelCase_ = [] lowerCamelCase_ = -1 for i in range(__lowercase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowercase ) if len(__lowercase ) > 0: lowerCamelCase_ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCamelCase_ = i total_time += burst_time[target_process] completed += 1 lowerCamelCase_ = 0 lowerCamelCase_ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = [0] * no_of_processes for i in range(__lowercase ): lowerCamelCase_ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") a_ : Union[str, Any] = 4 a_ : List[Any] = [2, 5, 3, 7] a_ : Any = [0, 0, 0, 0] a_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes) a_ : int = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
55
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging A : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict: """simple docstring""" super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.enable_attention_slicing(_UpperCAmelCase ) @torch.no_grad() def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = 1 elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = len(_UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(_UpperCAmelCase )}.''' ) # get prompt text embeddings lowercase__ = self.tokenizer( _UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) lowercase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 ) lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ = 42 if negative_prompt is None: lowercase__ = [""""""] elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !=''' f''' {type(_UpperCAmelCase )}.''' ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = [negative_prompt] elif batch_size != len(_UpperCAmelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: lowercase__ = negative_prompt lowercase__ = text_input_ids.shape[-1] lowercase__ = self.tokenizer( _UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , ) lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ = uncond_embeddings.shape[1] lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 ) lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowercase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase__ = torch.randn( _UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device ) lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to( self.device ) else: lowercase__ = torch.randn( _UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowercase__ = latents_reference.to(self.device ) lowercase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowercase__ = 0 if dx < 0 else dx lowercase__ = 0 if dy < 0 else dy lowercase__ = max(-dx , 0 ) lowercase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ = {} if accepts_eta: lowercase__ = eta for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform guidance if do_classifier_free_guidance: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ = 1 / 0.18_215 * latents lowercase__ = self.vae.decode(_UpperCAmelCase ).sample lowercase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to( self.device ) lowercase__ , lowercase__ = self.safety_checker( images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowercase__ = None if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
368
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = 42 A__ = 42 def __init__(self : Union[str, Any] , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : ScoreSdeVeScheduler ) -> List[str]: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__(self : Optional[Any] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 2000 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" lowercase__ = self.unet.config.sample_size lowercase__ = (batch_size, 3, img_size, img_size) lowercase__ = self.unet lowercase__ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase ) * self.scheduler.init_noise_sigma lowercase__ = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCAmelCase ) self.scheduler.set_sigmas(_UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_correct(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample # prediction step lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_pred(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ) lowercase__ , lowercase__ = output.prev_sample, output.prev_sample_mean lowercase__ = sample_mean.clamp(0 , 1 ) lowercase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCAmelCase )
146
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTConfig() if "large" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any = 1_024 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4_096 SCREAMING_SNAKE_CASE__ : Tuple = 24 SCREAMING_SNAKE_CASE__ : Any = 16 SCREAMING_SNAKE_CASE__ : Any = [5, 11, 17, 23] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [256, 512, 1_024, 1_024] SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = 150 SCREAMING_SNAKE_CASE__ : int = """huggingface/label-files""" SCREAMING_SNAKE_CASE__ : int = """ade20k-id2label.json""" SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(cached_download(hf_hub_url(_snake_case ,_snake_case ,repo_type="""dataset""" ) ) ,"""r""" ) ) SCREAMING_SNAKE_CASE__ : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 150, 480, 480] return config, expected_shape def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(_snake_case ,_snake_case ) def lowercase_ ( _snake_case ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" ) if "pretrained.model" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.embeddings""" ) if "patch_embed" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""patch_embed""" ,"""patch_embeddings""" ) if "pos_embed" in name: SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pos_embed""" ,"""position_embeddings""" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" ) if "proj" in name and "project" not in name: SCREAMING_SNAKE_CASE__ : int = name.replace("""proj""" ,"""projection""" ) if "blocks" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""blocks""" ,"""layer""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ : Any = name.replace("""mlp.fc2""" ,"""output.dense""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""norm1""" ,"""layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""norm2""" ,"""layernorm_after""" ) if "scratch.output_conv" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""scratch.output_conv""" ,"""head""" ) if "scratch" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""scratch""" ,"""neck""" ) if "layer1_rn" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""layer1_rn""" ,"""convs.0""" ) if "layer2_rn" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer2_rn""" ,"""convs.1""" ) if "layer3_rn" in name: SCREAMING_SNAKE_CASE__ : Any = name.replace("""layer3_rn""" ,"""convs.2""" ) if "layer4_rn" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" ) if "refinenet" in name: SCREAMING_SNAKE_CASE__ : List[str] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 SCREAMING_SNAKE_CASE__ : List[Any] = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""out_conv""" ,"""projection""" ) if "resConfUnit1" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""resConfUnit1""" ,"""residual_layer1""" ) if "resConfUnit2" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""resConfUnit2""" ,"""residual_layer2""" ) if "conv1" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""conv1""" ,"""convolution1""" ) if "conv2" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""conv2""" ,"""convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: SCREAMING_SNAKE_CASE__ : Dict = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained""" ,"""dpt""" ) if "bn" in name: SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""bn""" ,"""batch_norm""" ) if "head" in name: SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""head""" ,"""head.head""" ) if "encoder.norm" in name: SCREAMING_SNAKE_CASE__ : str = name.replace("""encoder.norm""" ,"""layernorm""" ) if "auxlayer" in name: SCREAMING_SNAKE_CASE__ : int = name.replace("""auxlayer""" ,"""auxiliary_head.head""" ) return name def lowercase_ ( _snake_case ,_snake_case ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[: config.hidden_size, :] SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ) return im @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dpt_config(_snake_case ) # load original state_dict from URL SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(_snake_case ) # rename keys for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = val # read in qkv matrices read_in_q_k_v(_snake_case ,_snake_case ) # load HuggingFace model SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # Check outputs on an image SCREAMING_SNAKE_CASE__ : Tuple = 480 if """ade""" in checkpoint_url else 384 SCREAMING_SNAKE_CASE__ : str = DPTImageProcessor(size=_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = prepare_img() SCREAMING_SNAKE_CASE__ : Dict = image_processor(_snake_case ,return_tensors="""pt""" ) # forward pass SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth # Assert logits SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(_snake_case ) assert ( torch.allclose(outputs[0, 0, :3, :3] ,_snake_case ,atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] ,_snake_case ) ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=_snake_case ,) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=_snake_case ,) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase__ : Any = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
25
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = IFPipeline __UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" return self._get_dummy_components() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" self._test_save_load_local() def __magic_name__ (self ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components ) SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : int = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 SCREAMING_SNAKE_CASE__ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
25
1
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =0 def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =CLIPConfig() # Create a dummy config file with image_proceesor_type _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ).to_dict() config_dict.pop("""image_processor_type""" ) _lowerCAmelCase =CLIPImageProcessor(**__UpperCAmelCase ) # save in new folder model_config.save_pretrained(__UpperCAmelCase ) config.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) # make sure private variable is not incorrectly saved _lowerCAmelCase =json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: with self.assertRaisesRegex( __UpperCAmelCase , """clip-base is not a local folder and is not a valid model identifier""" ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""clip-base""" ) def _lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" ) def _lowerCAmelCase ( self ) -> List[str]: with self.assertRaisesRegex( __UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__UpperCAmelCase ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__UpperCAmelCase ): _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _lowerCAmelCase ( self ) -> Dict: try: AutoConfig.register("""custom""" , __UpperCAmelCase ) AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__UpperCAmelCase ): AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =CustomImageProcessor.from_pretrained(__UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowerCAmelCase ( self ) -> Tuple: class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = True try: AutoConfig.register("""custom""" , __UpperCAmelCase ) AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) # If remote code is not set, the default is to use local _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(__UpperCAmelCase , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowercase_ = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = """maskformer""" _a = {"""hidden_size""": """mask_feature_size"""} _a = ["""resnet""", """swin"""] _a = ["""detr"""] def __init__( self , lowerCAmelCase = 256 , lowerCAmelCase = 256 , lowerCAmelCase = 0.1 , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 0.02 , lowerCAmelCase = 1.0 , lowerCAmelCase = 1.0 , lowerCAmelCase = 1.0 , lowerCAmelCase = 20.0 , lowerCAmelCase = None , **lowerCAmelCase , ) -> Any: '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _lowercase =SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCAmelCase , lowerCAmelCase ): _lowercase =backbone_config.pop('model_type' ) _lowercase =CONFIG_MAPPING[backbone_model_type] _lowercase =config_class.from_dict(lowerCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' F'''Supported model types: {",".join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _lowercase =DetrConfig() else: # verify that the decoder is supported _lowercase =( decoder_config.pop('model_type' ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'''Transformer Decoder {decoder_type} not supported, please use one of''' F''' {",".join(self.decoders_supported )}''' ) if isinstance(lowerCAmelCase , lowerCAmelCase ): _lowercase =CONFIG_MAPPING[decoder_type] _lowercase =config_class.from_dict(lowerCAmelCase ) _lowercase =backbone_config _lowercase =decoder_config # main feature dimension for the model _lowercase =fpn_feature_size _lowercase =mask_feature_size # initializer _lowercase =init_std _lowercase =init_xavier_std # Hungarian matcher && loss _lowercase =cross_entropy_weight _lowercase =dice_weight _lowercase =mask_weight _lowercase =use_auxiliary_loss _lowercase =no_object_weight _lowercase =output_auxiliary_logits _lowercase =self.decoder_config.encoder_attention_heads _lowercase =self.decoder_config.num_hidden_layers super().__init__(**lowerCAmelCase ) @classmethod def A__ ( cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> int: '''simple docstring''' return cls( backbone_config=lowerCAmelCase , decoder_config=lowerCAmelCase , **lowerCAmelCase , ) def A__ ( self ) -> Dict[str, any]: '''simple docstring''' _lowercase =copy.deepcopy(self.__dict__ ) _lowercase =self.backbone_config.to_dict() _lowercase =self.decoder_config.to_dict() _lowercase =self.__class__.model_type return output
205
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) lowercase_ = logging.getLogger(__name__) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]: '''simple docstring''' _lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] ) _lowercase =layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , ) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): def __init__( self , lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' super().__init__(lowerCAmelCase ) _lowercase =BertEncoderWithPabee(lowerCAmelCase ) self.init_weights() _lowercase =0 _lowercase =0 _lowercase =0 _lowercase =0 def A__ ( self , lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' _lowercase =threshold def A__ ( self , lowerCAmelCase ) -> List[Any]: '''simple docstring''' _lowercase =patience def A__ ( self ) -> Dict: '''simple docstring''' _lowercase =0 _lowercase =0 def A__ ( self ) -> int: '''simple docstring''' _lowercase =self.inference_layers_num / self.inference_instances_num _lowercase =( F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(lowerCAmelCase ) @add_start_docstrings_to_model_forward(lowerCAmelCase ) def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _lowercase =input_ids.size() elif inputs_embeds is not None: _lowercase =inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _lowercase =input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase ) if token_type_ids is None: _lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: _lowercase , _lowercase , _lowercase =encoder_hidden_states.size() _lowercase =(encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: _lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase ) _lowercase =self.invert_attention_mask(lowerCAmelCase ) else: _lowercase =None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers ) _lowercase =self.embeddings( input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase ) _lowercase =embedding_output if self.training: _lowercase =[] for i in range(self.config.num_hidden_layers ): _lowercase =self.encoder.adaptive_forward( lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase ) _lowercase =self.pooler(lowerCAmelCase ) _lowercase =output_layers[i](output_dropout(lowerCAmelCase ) ) res.append(lowerCAmelCase ) elif self.patience == 0: # Use all layers for inference _lowercase =self.encoder( lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , ) _lowercase =self.pooler(encoder_outputs[0] ) _lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )] else: _lowercase =0 _lowercase =None _lowercase =0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 _lowercase =self.encoder.adaptive_forward( lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase ) _lowercase =self.pooler(lowerCAmelCase ) _lowercase =output_layers[i](lowerCAmelCase ) if regression: _lowercase =logits.detach() if patient_result is not None: _lowercase =patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: _lowercase =0 else: _lowercase =logits.detach().argmax(dim=1 ) if patient_result is not None: _lowercase =patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ): patient_counter += 1 else: _lowercase =0 _lowercase =logits if patient_counter == self.patience: break _lowercase =[patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , ) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): def __init__( self , lowerCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__(lowerCAmelCase ) _lowercase =config.num_labels _lowercase =BertModelWithPabee(lowerCAmelCase ) _lowercase =nn.Dropout(config.hidden_dropout_prob ) _lowercase =nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase ) def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]: '''simple docstring''' _lowercase =self.bert( input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) _lowercase =(logits[-1],) if labels is not None: _lowercase =None _lowercase =0 for ix, logits_item in enumerate(lowerCAmelCase ): if self.num_labels == 1: # We are doing regression _lowercase =MSELoss() _lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: _lowercase =CrossEntropyLoss() _lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: _lowercase =loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 _lowercase =(total_loss / total_weights,) + outputs return outputs
205
1
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCamelCase_ : _A : Optional[Any] = None def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , snake_case__ ) def UpperCamelCase_ ( self ) -> List[Any]: """simple docstring""" UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = os.path.join(snake_case__ , """feat_extract.json""" ) feat_extract_first.to_json_file(snake_case__ ) UpperCAmelCase = self.feature_extraction_class.from_json_file(snake_case__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCamelCase_ ( self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = feat_extract_first.save_pretrained(snake_case__ )[0] check_json_file_has_correct_format(snake_case__ ) UpperCAmelCase = self.feature_extraction_class.from_pretrained(snake_case__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCamelCase_ ( self ) -> Optional[int]: """simple docstring""" UpperCAmelCase = self.feature_extraction_class() self.assertIsNotNone(snake_case__ )
363
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ : Dict = logging.get_logger(__name__) def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False ): '''simple docstring''' UpperCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""), ( """text_embeddings.position_embeddings.weight""", """vilt.embeddings.text_embeddings.position_embeddings.weight""", ), ("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""), ( """text_embeddings.token_type_embeddings.weight""", """vilt.embeddings.text_embeddings.token_type_embeddings.weight""", ), ("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""), ("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""), # patch embeddings ("""transformer.cls_token""", """vilt.embeddings.cls_token"""), ("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""), ("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""), ("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""), # token type embeddings ("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""), ] ) # final layernorm + pooler rename_keys.extend( [ ("""transformer.norm.weight""", """vilt.layernorm.weight"""), ("""transformer.norm.bias""", """vilt.layernorm.bias"""), ("""pooler.dense.weight""", """vilt.pooler.dense.weight"""), ("""pooler.dense.bias""", """vilt.pooler.dense.bias"""), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("""vqa_classifier.0.weight""", """classifier.0.weight"""), ("""vqa_classifier.0.bias""", """classifier.0.bias"""), ("""vqa_classifier.1.weight""", """classifier.1.weight"""), ("""vqa_classifier.1.bias""", """classifier.1.bias"""), ("""vqa_classifier.3.weight""", """classifier.3.weight"""), ("""vqa_classifier.3.bias""", """classifier.3.bias"""), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ("""nlvr2_classifier.0.weight""", """classifier.0.weight"""), ("""nlvr2_classifier.0.bias""", """classifier.0.bias"""), ("""nlvr2_classifier.1.weight""", """classifier.1.weight"""), ("""nlvr2_classifier.1.bias""", """classifier.1.bias"""), ("""nlvr2_classifier.3.weight""", """classifier.3.weight"""), ("""nlvr2_classifier.3.bias""", """classifier.3.bias"""), ] ) else: pass return rename_keys def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' for i in range(config.num_hidden_layers ): UpperCAmelCase = """vilt.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase = in_proj_bias[: config.hidden_size] UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase = in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase , lowerCAmelCase ) def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = dct.pop(lowerCAmelCase ) UpperCAmelCase = val @torch.no_grad() def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' UpperCAmelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False if "vqa" in checkpoint_url: UpperCAmelCase = True UpperCAmelCase = 3129 UpperCAmelCase = """huggingface/label-files""" UpperCAmelCase = """vqa2-id2label.json""" UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} UpperCAmelCase = ViltForQuestionAnswering(lowerCAmelCase ) elif "nlvr" in checkpoint_url: UpperCAmelCase = True UpperCAmelCase = 2 UpperCAmelCase = {0: """False""", 1: """True"""} UpperCAmelCase = {v: k for k, v in config.idalabel.items()} UpperCAmelCase = 3 UpperCAmelCase = ViltForImagesAndTextClassification(lowerCAmelCase ) elif "irtr" in checkpoint_url: UpperCAmelCase = True UpperCAmelCase = ViltForImageAndTextRetrieval(lowerCAmelCase ) elif "mlm_itm" in checkpoint_url: UpperCAmelCase = True UpperCAmelCase = ViltForMaskedLM(lowerCAmelCase ) else: raise ValueError("""Unknown model type""" ) # load state_dict of original model, remove and rename some keys UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""state_dict"""] UpperCAmelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) for src, dest in rename_keys: rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) read_in_q_k_v(lowerCAmelCase , lowerCAmelCase ) if mlm_model or irtr_model: UpperCAmelCase = ["""itm_score.fc.weight""", """itm_score.fc.bias"""] for k in ignore_keys: state_dict.pop(lowerCAmelCase , lowerCAmelCase ) # load state dict into HuggingFace model model.eval() if mlm_model: UpperCAmelCase , UpperCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowerCAmelCase ) # Define processor UpperCAmelCase = ViltImageProcessor(size=384 ) UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) UpperCAmelCase = ViltProcessor(lowerCAmelCase , lowerCAmelCase ) # Forward pass on example inputs (image + text) if nlvr_model: UpperCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase ).raw ) UpperCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCAmelCase ).raw ) UpperCAmelCase = ( """The left image contains twice the number of dogs as the right image, and at least two dogs in total are""" """ standing.""" ) UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" ) UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" ) UpperCAmelCase = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: UpperCAmelCase = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCAmelCase ).raw ) if mlm_model: UpperCAmelCase = """a bunch of [MASK] laying on a [MASK].""" else: UpperCAmelCase = """How many cats are there?""" UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" ) UpperCAmelCase = model(**lowerCAmelCase ) # Verify outputs if mlm_model: UpperCAmelCase = torch.Size([1, 11, 30522] ) UpperCAmelCase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase , atol=1e-4 ) # verify masked token prediction equals "cats" UpperCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: UpperCAmelCase = torch.Size([1, 3129] ) UpperCAmelCase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase , atol=1e-4 ) # verify vqa prediction equals "2" UpperCAmelCase = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: UpperCAmelCase = torch.Size([1, 2] ) UpperCAmelCase = torch.tensor([-2.87_21, 2.12_91] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase ) processor.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase_ : Optional[Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
248
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = """▁""" lowercase__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } lowercase__ = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } lowercase__ = { """facebook/m2m100_418M""": 1024, } # fmt: off lowercase__ = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class A_ ( _snake_case ): '''simple docstring''' UpperCAmelCase_ : int = VOCAB_FILES_NAMES UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[Any] = ['input_ids', 'attention_mask'] UpperCAmelCase_ : List[int] = [] UpperCAmelCase_ : List[int] = [] def __init__( self : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : List[str]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : List[Any]="<unk>" , lowercase_ : List[Any]="m2m100" , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any]=8 , **lowercase_ : Union[str, Any] , ) -> int: UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase : Dict = language_codes UpperCAmelCase : Any = FAIRSEQ_LANGUAGE_CODES[language_codes] UpperCAmelCase : Any = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code} UpperCAmelCase : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) UpperCAmelCase : int = vocab_file UpperCAmelCase : Any = load_json(__lowerCAmelCase ) UpperCAmelCase : str = {v: k for k, v in self.encoder.items()} UpperCAmelCase : Optional[Any] = spm_file UpperCAmelCase : Union[str, Any] = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) UpperCAmelCase : Union[str, Any] = len(self.encoder ) UpperCAmelCase : Optional[Any] = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } UpperCAmelCase : Tuple = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} UpperCAmelCase : Any = {v: k for k, v in self.lang_token_to_id.items()} UpperCAmelCase : Dict = src_lang if src_lang is not None else 'en' UpperCAmelCase : str = tgt_lang UpperCAmelCase : Any = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) UpperCAmelCase : Optional[int] = num_madeup_words @property def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: return len(self.encoder ) + len(self.lang_token_to_id ) @property def UpperCAmelCase_ ( self : Dict ) -> List[Any]: return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self : int , lowercase_ : int ) -> Optional[int]: UpperCAmelCase : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> Tuple: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple ) -> Optional[int]: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Dict: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def UpperCAmelCase_ ( self : List[str] , lowercase_ : Any ) -> Any: UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token UpperCAmelCase : List[str] = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any] = None , lowercase_ : Optional[Any] = False ) -> Union[str, Any]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) UpperCAmelCase : Any = [1] * len(self.prefix_tokens ) UpperCAmelCase : Union[str, Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def UpperCAmelCase_ ( self : Dict , lowercase_ : str , lowercase_ : int = None ) -> str: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: UpperCAmelCase : int = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ) -> Any: UpperCAmelCase : str = self.__dict__.copy() UpperCAmelCase : Any = None return state def __setstate__( self : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCAmelCase : Any = {} UpperCAmelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] = None ) -> int: UpperCAmelCase : List[str] = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f"""{save_directory} should be a directory""" ) UpperCAmelCase : Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) UpperCAmelCase : str = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , 'wb' ) as fi: UpperCAmelCase : int = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : int = "en" , lowercase_ : Tuple = None , lowercase_ : str = "ro" , **lowercase_ : List[str] , ) -> Optional[Any]: UpperCAmelCase : Any = src_lang UpperCAmelCase : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ) -> List[Any]: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) UpperCAmelCase : List[str] = src_lang UpperCAmelCase : Union[str, Any] = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) UpperCAmelCase : Any = self.get_lang_id(__lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = tgt_lang_id return inputs def UpperCAmelCase_ ( self : List[Any] ) -> Any: self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self : Any , lowercase_ : Any ) -> List[str]: UpperCAmelCase : Any = self.get_lang_token(__lowerCAmelCase ) UpperCAmelCase : str = self.lang_token_to_id[lang_token] UpperCAmelCase : Union[str, Any] = [self.cur_lang_id] UpperCAmelCase : Any = [self.eos_token_id] def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Union[str, Any] ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.get_lang_token(__lowerCAmelCase ) UpperCAmelCase : Tuple = self.lang_token_to_id[lang_token] UpperCAmelCase : Any = [self.cur_lang_id] UpperCAmelCase : Tuple = [self.eos_token_id] def UpperCAmelCase_ ( self : str , lowercase_ : str ) -> Any: return self.lang_code_to_token[lang] def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] ) -> List[str]: UpperCAmelCase : str = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : str = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ ) spm.Load(str(lowerCAmelCase__ ) ) return spm def UpperCamelCase( UpperCAmelCase_ ): with open(lowerCAmelCase__ , 'r' ) as f: return json.load(lowerCAmelCase__ ) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): with open(lowerCAmelCase__ , 'w' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 )
151
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Path , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , ) -> Optional[int]: '''simple docstring''' if config_name_or_path is None: lowercase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: lowercase = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowercase = question_encoder_name_or_path lowercase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. lowercase = RagConfig.from_pretrained(lowerCAmelCase__ ) lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ ) lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ ) lowercase = gen_config lowercase = question_encoder_config lowercase = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase__ , lowerCAmelCase__ , config=lowerCAmelCase__ ) rag_model.save_pretrained(lowerCAmelCase__ ) # Sanity check. model_class.from_pretrained(lowerCAmelCase__ ) # Save tokenizers. lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": __lowerCAmelCase : int =argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) __lowerCAmelCase : List[str] =parser.parse_args() __lowerCAmelCase : Dict =Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
197
0
'''simple docstring''' from __future__ import annotations import pandas as pd def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =[0] * no_of_processes _SCREAMING_SNAKE_CASE =[0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(a_ ): _SCREAMING_SNAKE_CASE =burst_time[i] _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =9_99_99_99_99 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =False # Process until all processes are completed while complete != no_of_processes: for j in range(a_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: _SCREAMING_SNAKE_CASE =remaining_time[j] _SCREAMING_SNAKE_CASE =j _SCREAMING_SNAKE_CASE =True if not check: increment_time += 1 continue remaining_time[short] -= 1 _SCREAMING_SNAKE_CASE =remaining_time[short] if minm == 0: _SCREAMING_SNAKE_CASE =9_99_99_99_99 if remaining_time[short] == 0: complete += 1 _SCREAMING_SNAKE_CASE =False # Find finish time of current process _SCREAMING_SNAKE_CASE =increment_time + 1 # Calculate waiting time _SCREAMING_SNAKE_CASE =finish_time - arrival_time[short] _SCREAMING_SNAKE_CASE =finar - burst_time[short] if waiting_time[short] < 0: _SCREAMING_SNAKE_CASE =0 # Increment time increment_time += 1 return waiting_time def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =[0] * no_of_processes for i in range(a_ ): _SCREAMING_SNAKE_CASE =burst_time[i] + waiting_time[i] return turn_around_time def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Any ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 for i in range(a_ ): _SCREAMING_SNAKE_CASE =total_waiting_time + waiting_time[i] _SCREAMING_SNAKE_CASE =total_turn_around_time + turn_around_time[i] print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" ) print('Average turn around time =' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("Enter how many process you want to analyze") lowerCamelCase : Any = int(input()) lowerCamelCase : Tuple = [0] * no_of_processes lowerCamelCase : str = [0] * no_of_processes lowerCamelCase : List[str] = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("Enter the arrival time and burst time for process:--" + str(i + 1)) lowerCamelCase , lowerCamelCase : Optional[Any] = map(int, input().split()) lowerCamelCase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase : Union[str, Any] = burst_time lowerCamelCase : Tuple = no_of_processes lowerCamelCase : str = waiting_time lowerCamelCase : Optional[Any] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) lowerCamelCase : Optional[int] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ "Process", "BurstTime", "ArrivalTime", "WaitingTime", "TurnAroundTime", ], ) # Printing the dataFrame pd.set_option("display.max_rows", fcfs.shape[0] + 1) print(fcfs)
364
'''simple docstring''' from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): A__ = ['note_seq'] def __init__( self : List[str] , *_a : Any , **_a : Dict ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['note_seq'] ) @classmethod def A ( cls : Any , *_a : str , **_a : List[Any] ) -> List[str]: '''simple docstring''' requires_backends(cls , ['note_seq'] ) @classmethod def A ( cls : int , *_a : Optional[Any] , **_a : Optional[int] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['note_seq'] )
114
0
import os def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = os.path.join(os.path.dirname(_a ) , "num.txt" ) with open(_a ) as file_hand: return str(sum(int(_a ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
184
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
from __future__ import annotations from random import random from typing import Generic, TypeVar lowercase = TypeVar("""KT""") lowercase = TypeVar("""VT""") class __lowercase ( Generic[KT, VT] ): '''simple docstring''' def __init__( self : Any , _a : KT | str = "root" , _a : VT | None = None ): UpperCamelCase__ = key UpperCamelCase__ = value UpperCamelCase__ = [] def __repr__( self : Union[str, Any] ): return F"""Node({self.key}: {self.value})""" @property def A_ ( self : int ): return len(self.forward ) class __lowercase ( Generic[KT, VT] ): '''simple docstring''' def __init__( self : Any , _a : float = 0.5 , _a : int = 16 ): UpperCamelCase__ = Node[KT, VT]() UpperCamelCase__ = 0 UpperCamelCase__ = p UpperCamelCase__ = max_level def __str__( self : Any ): UpperCamelCase__ = list(self ) if len(_a ) == 0: return F"""SkipList(level={self.level})""" UpperCamelCase__ = max((len(str(_a ) ) for item in items) , default=4 ) UpperCamelCase__ = max(_a , 4 ) + 4 UpperCamelCase__ = self.head UpperCamelCase__ = [] UpperCamelCase__ = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(_a , '''-''' ) + '''* ''' * len(_a ) ) lines.append(''' ''' * label_size + '''| ''' * len(_a ) ) while len(node.forward ) != 0: UpperCamelCase__ = node.forward[0] lines.append( F"""[{node.key}]""".ljust(_a , '''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(_a ) ) UpperCamelCase__ = node.forward lines.append('''None'''.ljust(_a ) + '''* ''' * len(_a ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(_a ) def __iter__( self : Optional[Any] ): UpperCamelCase__ = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCamelCase__ = node.forward[0] def A_ ( self : int ): UpperCamelCase__ = 1 while random() < self.p and level < self.max_level: level += 1 return level def A_ ( self : int , _a : List[Any] ): UpperCamelCase__ = [] UpperCamelCase__ = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase__ = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(_a ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def A_ ( self : List[Any] , _a : KT ): UpperCamelCase__ , UpperCamelCase__ = self._locate_node(_a ) if node is not None: for i, update_node in enumerate(_a ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase__ = node.forward[i] else: UpperCamelCase__ = update_node.forward[:i] def A_ ( self : Optional[int] , _a : KT , _a : VT ): UpperCamelCase__ , UpperCamelCase__ = self._locate_node(_a ) if node is not None: UpperCamelCase__ = value else: UpperCamelCase__ = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , _a ): update_vector.append(self.head ) UpperCamelCase__ = level UpperCamelCase__ = Node(_a , _a ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(_a ) else: UpperCamelCase__ = new_node def A_ ( self : List[Any] , _a : VT ): UpperCamelCase__ , UpperCamelCase__ = self._locate_node(_a ) if node is not None: return node.value return None def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key1''', 3 ) skip_list.insert('''Key2''', 12 ) skip_list.insert('''Key3''', 41 ) skip_list.insert('''Key4''', -19 ) UpperCamelCase__ = skip_list.head UpperCamelCase__ = {} while node.level != 0: UpperCamelCase__ = node.forward[0] UpperCamelCase__ = node.value assert len(UpperCamelCase__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key1''', 10 ) skip_list.insert('''Key1''', 12 ) skip_list.insert('''Key5''', 7 ) skip_list.insert('''Key7''', 10 ) skip_list.insert('''Key10''', 5 ) skip_list.insert('''Key7''', 7 ) skip_list.insert('''Key5''', 5 ) skip_list.insert('''Key10''', 10 ) UpperCamelCase__ = skip_list.head UpperCamelCase__ = {} while node.level != 0: UpperCamelCase__ = node.forward[0] UpperCamelCase__ = node.value if len(UpperCamelCase__ ) != 4: print() assert len(UpperCamelCase__ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() assert skip_list.find('''Some key''' ) is None def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key2''', 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''', 10 ) skip_list.insert('''Key2''', 8 ) skip_list.insert('''V''', 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 14 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 14 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 142 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''X''' ) def traverse_keys(UpperCamelCase__ : Any ): yield node.key for forward_node in node.forward: yield from traverse_keys(UpperCamelCase__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowerCamelCase_ ( ): '''simple docstring''' def is_sorted(UpperCamelCase__ : Any ): return all(next_item >= item for item, next_item in zip(UpperCamelCase__, lst[1:] ) ) UpperCamelCase__ = SkipList() for i in range(10 ): skip_list.insert(UpperCamelCase__, UpperCamelCase__ ) assert is_sorted(list(UpperCamelCase__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(UpperCamelCase__ ) ) skip_list.insert(-12, -12 ) skip_list.insert(77, 77 ) assert is_sorted(list(UpperCamelCase__ ) ) def lowerCamelCase_ ( ): '''simple docstring''' for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowerCamelCase_ ( ): '''simple docstring''' UpperCamelCase__ = SkipList() skip_list.insert(2, '''2''' ) skip_list.insert(4, '''4''' ) skip_list.insert(6, '''4''' ) skip_list.insert(4, '''5''' ) skip_list.insert(8, '''4''' ) skip_list.insert(9, '''4''' ) skip_list.delete(4 ) print(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
35
def lowerCamelCase_ ( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int], UpperCamelCase__ : int ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase__ ) ) def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ): '''simple docstring''' if index == len(UpperCamelCase__ ): return True # Recursive Step for i in range(UpperCamelCase__ ): if valid_coloring(graph[index], UpperCamelCase__, UpperCamelCase__ ): # Color current vertex UpperCamelCase__ = i # Validate coloring if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, index + 1 ): return True # Backtrack UpperCamelCase__ = -1 return False def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ): '''simple docstring''' UpperCamelCase__ = [-1] * len(UpperCamelCase__ ) if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, 0 ): return colored_vertices return []
35
1
'''simple docstring''' import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 __A = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 __A = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class A : def __init__( self ) -> Any: '''simple docstring''' lowercase__ = WATERMARK_BITS lowercase__ = WatermarkEncoder() self.encoder.set_watermark("""bits""" , self.watermark ) def A__ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' if images.shape[-1] < 256: return images lowercase__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase__ = [self.encoder.encode(lowerCamelCase__ , """dwtDct""" ) for image in images] lowercase__ = torch.from_numpy(np.array(lowerCamelCase__ ) ).permute(0 , 3 , 1 , 2 ) lowercase__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
164
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A = logging.get_logger(__name__) __A = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class A ( __UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase : Union[str, Any] = """convnextv2""" def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0.0 , lowerCamelCase__=224 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) lowercase__ = num_channels lowercase__ = patch_size lowercase__ = num_stages lowercase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes lowercase__ = [3, 3, 9, 3] if depths is None else depths lowercase__ = hidden_act lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = drop_path_rate lowercase__ = image_size lowercase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowercase__ , lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
164
1
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(snake_case , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = _distribute_shards(**snake_case ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : int )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : List[Any] = _split_gen_kwargs(snake_case , snake_case ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[Any] )-> Tuple: '''simple docstring''' if expected is RuntimeError: with pytest.raises(snake_case ): _number_of_shards_in_gen_kwargs(snake_case ) else: UpperCAmelCase__ : Any = _number_of_shards_in_gen_kwargs(snake_case ) assert out == expected
298
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp lowercase__ = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } lowercase__ = { "RUCAIBox/mvp": 1024, } class A_ ( _snake_case ): '''simple docstring''' UpperCAmelCase_ : Any = VOCAB_FILES_NAMES UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCAmelCase_ : Dict = MvpTokenizer def __init__( self : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any="replace" , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Tuple="</s>" , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<mask>" , lowercase_ : int=False , lowercase_ : List[str]=True , **lowercase_ : Tuple , ) -> Union[str, Any]: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , ) UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space: UpperCAmelCase : Optional[int] = getattr(lowercase_ , pre_tok_state.pop('type' ) ) UpperCAmelCase : Any = add_prefix_space UpperCAmelCase : List[str] = pre_tok_class(**lowercase_ ) UpperCAmelCase : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase : int = 'post_processor' UpperCAmelCase : str = getattr(self.backend_tokenizer , lowercase_ , lowercase_ ) if tokenizer_component_instance: UpperCAmelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase : List[Any] = tuple(state['sep'] ) if "cls" in state: UpperCAmelCase : Tuple = tuple(state['cls'] ) UpperCAmelCase : Union[str, Any] = False if state.get('add_prefix_space' , lowercase_ ) != add_prefix_space: UpperCAmelCase : int = add_prefix_space UpperCAmelCase : Optional[Any] = True if state.get('trim_offsets' , lowercase_ ) != trim_offsets: UpperCAmelCase : List[Any] = trim_offsets UpperCAmelCase : int = True if changes_to_apply: UpperCAmelCase : int = getattr(lowercase_ , state.pop('type' ) ) UpperCAmelCase : int = component_class(**lowercase_ ) setattr(self.backend_tokenizer , lowercase_ , lowercase_ ) @property def UpperCAmelCase_ ( self : str ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value UpperCAmelCase : Dict = value def UpperCAmelCase_ ( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> BatchEncoding: UpperCAmelCase : List[Any] = kwargs.get('is_split_into_words' , lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> BatchEncoding: UpperCAmelCase : Any = kwargs.get('is_split_into_words' , lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase : int = self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : Optional[Any]=None ) -> Any: UpperCAmelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase : Optional[Any] = [self.sep_token_id] UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
151
'''simple docstring''' from __future__ import annotations def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) UpperCAmelCase : int = number_of_bytes // partitions UpperCAmelCase : List[str] = [] for i in range(UpperCAmelCase_ ): UpperCAmelCase : List[Any] = i * bytes_per_partition + 1 UpperCAmelCase : str = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F"""{start_bytes}-{end_bytes}""" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
151
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ : int = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase_ : Optional[Any] = { """google/rembert""": 2_5_6, } lowerCamelCase_ : Optional[Any] = """▁""" class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = RemBertTokenizer def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) a =do_lower_case a =remove_space a =keep_accents a =vocab_file a =False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: a =[self.sep_token_id] a =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: a =[self.sep_token_id] a =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) ) return a =os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
215
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCamelCase_ : int = logging.get_logger(__name__) class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "upernet" def __init__( self , __A=None , __A=512 , __A=0.02 , __A=[1, 2, 3, 6] , __A=True , __A=0.4 , __A=384 , __A=256 , __A=1 , __A=False , __A=255 , **__A , ) -> Tuple: super().__init__(**__A ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) a =CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(__A , __A ): a =backbone_config.get('''model_type''' ) a =CONFIG_MAPPING[backbone_model_type] a =config_class.from_dict(__A ) a =backbone_config a =hidden_size a =initializer_range a =pool_scales a =use_auxiliary_head a =auxiliary_loss_weight a =auxiliary_in_channels a =auxiliary_channels a =auxiliary_num_convs a =auxiliary_concat_input a =loss_ignore_index def SCREAMING_SNAKE_CASE ( self ) -> Dict: a =copy.deepcopy(self.__dict__ ) a =self.backbone_config.to_dict() a =self.__class__.model_type return output
215
1
'''simple docstring''' class __UpperCAmelCase ( _lowerCamelCase ): pass class __UpperCAmelCase ( _lowerCamelCase ): pass class __UpperCAmelCase : def __init__( self ): """simple docstring""" _snake_case = [ [], [], [], ] def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(lowerCAmelCase_ ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCamelCase ( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self ): """simple docstring""" return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class __UpperCAmelCase : def __init__( self ): """simple docstring""" _snake_case = [] def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty' ) else: _snake_case = min(self.queue ) self.queue.remove(lowerCAmelCase_ ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: _snake_case = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(__A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def SCREAMING_SNAKE_CASE__ ( ) -> str: _snake_case = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(__A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
42
'''simple docstring''' from collections import defaultdict from math import gcd def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int: _snake_case = defaultdict(__A ) _snake_case = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ): if gcd(__A , __A ) > 1: continue _snake_case = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__A , limit + 1 , __A ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
42
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase : Dict = "nat" lowerCAmelCase : List[str] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[int] , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Tuple=64 , lowerCamelCase__ : int=[3, 4, 6, 5] , lowerCamelCase__ : Tuple=[2, 4, 8, 16] , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : str=3.0 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : str=1E-5 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Any , ) ->str: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = patch_size _UpperCAmelCase : Union[str, Any] = num_channels _UpperCAmelCase : str = embed_dim _UpperCAmelCase : int = depths _UpperCAmelCase : Tuple = len(lowerCamelCase__ ) _UpperCAmelCase : Any = num_heads _UpperCAmelCase : Optional[int] = kernel_size _UpperCAmelCase : Optional[Any] = mlp_ratio _UpperCAmelCase : int = qkv_bias _UpperCAmelCase : List[str] = hidden_dropout_prob _UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : int = hidden_act _UpperCAmelCase : str = layer_norm_eps _UpperCAmelCase : Any = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase : Optional[int] = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) _UpperCAmelCase : Tuple = layer_scale_init_value _UpperCAmelCase : Tuple = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] _UpperCAmelCase , _UpperCAmelCase : int = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
322
1
from ..utils import DummyObject, requires_backends class a__ ( metaclass=snake_case__ ): _a : List[str] = ["""keras_nlp"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["keras_nlp"] )
92
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __snake_case : Dict = logging.get_logger(__name__) __snake_case : Any = {"""vocab_file""": """sentencepiece.bpe.model"""} __snake_case : Union[str, Any] = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __snake_case : Tuple = { """moussaKam/mbarthez""": 10_24, """moussaKam/barthez""": 10_24, """moussaKam/barthez-orangesum-title""": 10_24, } __snake_case : int = """▁""" class A__(a_ ): """simple docstring""" _A : str = VOCAB_FILES_NAMES _A : int = PRETRAINED_VOCAB_FILES_MAP _A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it a_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token a_ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , ) a_ : List[Any] = vocab_file a_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowercase ) ) a_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} a_ : str = len(self.sp_model ) - 1 a_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a_ : Optional[Any] = [self.cls_token_id] a_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) if token_ids_a is None: return [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1] def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]: a_ : Optional[Any] = [self.sep_token_id] a_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase__ ( self ) -> List[Any]: return len(self.sp_model ) def UpperCamelCase__ ( self ) -> List[str]: a_ : List[Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , _lowercase ) -> List[str]: return self.sp_model.encode(_lowercase , out_type=_lowercase ) def UpperCamelCase__ ( self , _lowercase ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a_ : Optional[int] = self.sp_model.PieceToId(_lowercase ) return spm_id if spm_id else self.unk_token_id def UpperCamelCase__ ( self , _lowercase ) -> Union[str, Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_lowercase ) def UpperCamelCase__ ( self , _lowercase ) -> Any: a_ : Dict = [] a_ : Union[str, Any] = """""" a_ : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowercase ) + token a_ : Dict = True a_ : int = [] else: current_sub_tokens.append(_lowercase ) a_ : Union[str, Any] = False out_string += self.sp_model.decode(_lowercase ) return out_string.strip() def __getstate__( self ) -> Optional[int]: a_ : Any = self.__dict__.copy() a_ : int = None return state def __setstate__( self , _lowercase ) -> Union[str, Any]: a_ : List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): a_ : str = {} a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]: if not os.path.isdir(_lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a_ : Dict = os.path.join( _lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowercase ) elif not os.path.isfile(self.vocab_file ): with open(_lowercase , """wb""" ) as fi: a_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_lowercase ) return (out_vocab_file,)
248
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = SwinConfig(image_size=192 ) if "base" in model_name: UpperCAmelCase_ : str = 6 UpperCAmelCase_ : List[Any] = 128 UpperCAmelCase_ : int = (2, 2, 18, 2) UpperCAmelCase_ : Optional[Any] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Any = 12 UpperCAmelCase_ : Tuple = 192 UpperCAmelCase_ : Dict = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : str = depths UpperCAmelCase_ : Dict = num_heads return config def __a ( __lowerCamelCase ): if "encoder.mask_token" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.mask_token", "embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("encoder.patch_embed.norm", "embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Any = name.replace("attn.proj", "attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : int = name.replace("attn", "attention.self" ) if "norm1" in name: UpperCAmelCase_ : List[Any] = name.replace("norm1", "layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : str = name.replace("norm2", "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc2", "output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[Any] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : Optional[int] = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : str = "swin." + name return name def __a ( __lowerCamelCase, __lowerCamelCase ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : List[str] = orig_state_dict.pop(__lowerCamelCase ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[Any] = key.split("." ) UpperCAmelCase_ : List[str] = int(key_split[2] ) UpperCAmelCase_ : Dict = int(key_split[4] ) UpperCAmelCase_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[str] = val[:dim, :] UpperCAmelCase_ : List[Any] = val[ dim : dim * 2, : ] UpperCAmelCase_ : Optional[int] = val[-dim:, :] else: UpperCAmelCase_ : Tuple = val[ :dim ] UpperCAmelCase_ : List[str] = val[ dim : dim * 2 ] UpperCAmelCase_ : Any = val[ -dim: ] else: UpperCAmelCase_ : List[str] = val return orig_state_dict def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase, map_location="cpu" )["model"] UpperCAmelCase_ : List[Any] = get_swin_config(__lowerCamelCase ) UpperCAmelCase_ : Dict = SwinForMaskedImageModeling(__lowerCamelCase ) model.eval() UpperCAmelCase_ : int = convert_state_dict(__lowerCamelCase, __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = ViTImageProcessor(size={"height": 192, "width": 192} ) UpperCAmelCase_ : Dict = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) UpperCAmelCase_ : Any = image_processor(images=__lowerCamelCase, return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**__lowerCamelCase ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(f"""microsoft/{model_name}""" ) image_processor.push_to_hub(f"""microsoft/{model_name}""" ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _a = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
23
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = "longformer" def __init__( self , __UpperCAmelCase = 512 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0 , __UpperCAmelCase = 2 , __UpperCAmelCase = 30_522 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 3_072 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 512 , __UpperCAmelCase = 2 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1E-12 , __UpperCAmelCase = False , **__UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCAmelCase : str = attention_window __UpperCAmelCase : Any = sep_token_id __UpperCAmelCase : Optional[int] = bos_token_id __UpperCAmelCase : List[Any] = eos_token_id __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Any = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : Tuple = onnx_export class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None ) -> int: '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = True @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __UpperCAmelCase : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""global_attention_mask""", dynamic_axis), ] ) @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCAmelCase : List[Any] = super().outputs if self.task == "default": __UpperCAmelCase : List[str] = {0: """batch"""} return outputs @property def __A ( self ) -> float: '''simple docstring''' return 1E-4 @property def __A ( self ) -> int: '''simple docstring''' # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = super().generate_dummy_inputs( preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly __UpperCAmelCase : Dict = torch.zeros_like(inputs["""input_ids"""] ) # make every second token global __UpperCAmelCase : str = 1 return inputs
254
'''simple docstring''' from pathlib import Path import fire def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ): """simple docstring""" __UpperCAmelCase : List[str] = Path(lowerCAmelCase__ ) __UpperCAmelCase : str = Path(lowerCAmelCase__ ) dest_dir.mkdir(exist_ok=lowerCAmelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase : Optional[int] = dest_dir.joinpath(path.name ) print(lowerCAmelCase__ ) dest_path.open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
254
1
from collections import defaultdict class a : """simple docstring""" def __init__( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> str: __snake_case : Union[str, Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 __snake_case : Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) ) ] __snake_case : Optional[Any] = defaultdict(lowerCamelCase ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 __snake_case : List[str] = (1 << len(lowerCamelCase )) - 1 def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Tuple ) -> str: # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement __snake_case : List[str] = self.count_ways_until(lowerCamelCase , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. __snake_case : Dict = total_ways_util return self.dp[mask][task_no] def __snake_case ( self : Dict , lowerCamelCase : Union[str, Any] ) -> Tuple: # Store the list of persons for each task for i in range(len(lowerCamelCase ) ): for j in task_performed[i]: self.task[j].append(lowerCamelCase ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _snake_case : Dict = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _snake_case : Tuple = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
134
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): if index == r: for j in range(__lowerCamelCase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # A temporary array to store all combination one by one __snake_case : Union[str, Any] = [0] * r # Print all combination using temporary array 'data[]' combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 ) if __name__ == "__main__": # Driver code to check the function above _snake_case : List[str] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
134
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class a__ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] =None class a__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] =PandasConfig def _lowercase ( self : List[str] ) ->str: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] ) ->Optional[int]: """simple docstring""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE : Any = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__ , (str, list, tuple) ): SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : int = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : List[str] = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={"""files""": files} ) ) return splits def _lowercase ( self : int , UpperCAmelCase__ : pa.Table ) ->pa.Table: """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : int = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema ) return pa_table def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int ) ->Any: """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): with open(UpperCAmelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE : Tuple = pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase__ ) ) yield i, self._cast_table(UpperCAmelCase__ )
245
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCAmelCase__ : Tuple = logging.getLogger(__name__) class a__ ( UpperCAmelCase ): """simple docstring""" UpperCAmelCase__ : List[str] ="""token-classification""" def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Optional[Any]: """simple docstring""" if type(UpperCAmelCase__ ) == dict: SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = import_module("""tasks""" ) try: SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , hparams.task_type ) SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.get_labels(hparams.labels ) SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss().ignore_index super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Any: """simple docstring""" return self.model(**UpperCAmelCase__ ) def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE : List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : int = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _lowercase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.hparams for mode in ["train", "dev", "test"]: SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ ) if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase__ ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = self.token_classification_task.convert_examples_to_features( UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , UpperCAmelCase__ ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) ->DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ ) logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = torch.load(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ ) def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) ->Tuple: """simple docstring""" """Compute validation""" "" SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE : str = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE : Dict = self(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = outputs[:2] SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE : Tuple = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self : int , UpperCAmelCase__ : List[str] ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE : Tuple = np.argmax(UpperCAmelCase__ , axis=2 ) SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(self.labels ) ) SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) SCREAMING_SNAKE_CASE : Tuple = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), """precision""": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), """recall""": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), """f1""": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() ) SCREAMING_SNAKE_CASE : Optional[Any] = results return ret, preds_list, out_label_list def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._eval_end(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._eval_end(UpperCAmelCase__ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 SCREAMING_SNAKE_CASE : Dict = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) ->List[Any]: """simple docstring""" BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ ) parser.add_argument( """--task_type""" , default="""NER""" , type=UpperCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=UpperCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=UpperCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": UpperCAmelCase__ : str = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCAmelCase__ : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase__ : int = parser.parse_args() UpperCAmelCase__ : Union[str, Any] = NERTransformer(args) UpperCAmelCase__ : str = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True)) UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
245
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _a = logging.get_logger(__name__) class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" ) UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ ) super().__init__(**lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]] UpperCAmelCase_ : List[str] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase_ ) == 0: if return_attention_mask: UpperCAmelCase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : List[str] = required_input[0] if isinstance(lowercase_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : Any = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase_ ): UpperCAmelCase_ : Optional[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase_ ): UpperCAmelCase_ : Dict = "tf" elif is_torch_tensor(lowercase_ ): UpperCAmelCase_ : Any = "pt" elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : str = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(lowercase_ )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ ) else: UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ ) UpperCAmelCase_ : str = processed_features[self.model_input_names[0]] UpperCAmelCase_ : int = len(lowercase_ ) if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : int = [] for i in range(lowercase_ ): UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : List[str] = self._truncate( lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , ) truncated_inputs.append(lowercase_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : List[str] = {} for i in range(lowercase_ ): # padding UpperCAmelCase_ : int = self._pad( truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : List[Any] = value.astype(np.floataa ) batch_outputs[key].append(lowercase_ ) return BatchFeature(lowercase_ , tensor_type=lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Tuple = len(lowercase_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Dict = max_length - len(lowercase_ ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] , (0, difference) ) UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : Optional[Any] = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : Optional[Any] = np.pad( processed_features["attention_mask"] , (difference, 0) ) UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : str = np.pad( lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : int = padding else: UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
356
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Return True if there is node that has not iterated. UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase ) UpperCAmelCase_ : Any = [] queue.append(__lowerCamelCase ) UpperCAmelCase_ : Tuple = True while queue: UpperCAmelCase_ : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) UpperCAmelCase_ : Any = True UpperCAmelCase_ : Union[str, Any] = u return visited[t] def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # This array is filled by BFS and to store path UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase )) UpperCAmelCase_ : Any = 0 while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = float("Inf" ) UpperCAmelCase_ : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] ) UpperCAmelCase_ : Dict = parent[s] max_flow += path_flow UpperCAmelCase_ : Optional[Any] = sink while v != source: UpperCAmelCase_ : List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase_ : Optional[int] = parent[v] return max_flow _a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _a , _a = 0, 5 print(ford_fulkerson(graph, source, sink))
23
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :List[str] ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]: __UpperCamelCase : Any = 1 __UpperCamelCase : Union[str, Any] = 3 __UpperCamelCase : List[str] = (3_2, 3_2) __UpperCamelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase ) return image @property def _lowerCamelCase ( self :Optional[int] ) -> Any: torch.manual_seed(0 ) __UpperCamelCase : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) return model @property def _lowerCamelCase ( self :Optional[Any] ) -> Any: torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowerCamelCase ( self :Optional[int] ) -> Tuple: torch.manual_seed(0 ) __UpperCamelCase : Any = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(__lowerCAmelCase ) @property def _lowerCamelCase ( self :str ) -> int: def extract(*a :Any , **a :Optional[int] ): class lowerCamelCase__ : '''simple docstring''' def __init__( self :str ) -> List[str]: __UpperCamelCase : Dict = torch.ones([0] ) def _lowerCamelCase ( self :Any , a :Optional[int] ) -> Any: self.pixel_values.to(__lowerCAmelCase ) return self return Out() return extract def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]: __UpperCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Dict = self.dummy_cond_unet __UpperCamelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowerCAmelCase ) __UpperCamelCase : Optional[int] = self.dummy_vae __UpperCamelCase : Optional[Any] = self.dummy_text_encoder __UpperCamelCase : List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) __UpperCamelCase : Optional[Any] = 7_7 __UpperCamelCase : Optional[int] = self.dummy_image.to(__lowerCAmelCase ) __UpperCamelCase : Tuple = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __UpperCamelCase : int = AltDiffusionImgaImgPipeline( unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCAmelCase ) __UpperCamelCase : List[Any] = alt_pipe.to(__lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) __UpperCamelCase : str = "A painting of a squirrel eating a burger" __UpperCamelCase : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) __UpperCamelCase : Union[str, Any] = alt_pipe( [prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCAmelCase , ) __UpperCamelCase : str = output.images __UpperCamelCase : Optional[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) __UpperCamelCase : str = alt_pipe( [prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] __UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] __UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __UpperCamelCase : Union[str, Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowerCamelCase ( self :Optional[int] ) -> str: __UpperCamelCase : List[str] = self.dummy_cond_unet __UpperCamelCase : Dict = PNDMScheduler(skip_prk_steps=__lowerCAmelCase ) __UpperCamelCase : Any = self.dummy_vae __UpperCamelCase : Any = self.dummy_text_encoder __UpperCamelCase : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) __UpperCamelCase : Dict = 7_7 __UpperCamelCase : Tuple = self.dummy_image.to(__lowerCAmelCase ) # put models in fp16 __UpperCamelCase : Any = unet.half() __UpperCamelCase : str = vae.half() __UpperCamelCase : int = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase : Optional[int] = AltDiffusionImgaImgPipeline( unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , ) __UpperCamelCase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCAmelCase ) __UpperCamelCase : int = alt_pipe.to(__lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase ) __UpperCamelCase : Any = "A painting of a squirrel eating a burger" __UpperCamelCase : Tuple = torch.manual_seed(0 ) __UpperCamelCase : Tuple = alt_pipe( [prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="np" , image=__lowerCAmelCase , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowerCamelCase ( self :List[str] ) -> List[Any]: __UpperCamelCase : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 __UpperCamelCase : Optional[Any] = init_image.resize((7_6_0, 5_0_4) ) __UpperCamelCase : str = "BAAI/AltDiffusion" __UpperCamelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( __lowerCAmelCase , safety_checker=__lowerCAmelCase , ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() __UpperCamelCase : Dict = "A fantasy landscape, trending on artstation" __UpperCamelCase : Optional[int] = torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCAmelCase , output_type="np" , ) __UpperCamelCase : Tuple = output.images[0] __UpperCamelCase : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) __UpperCamelCase : Optional[int] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :str ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self :Tuple ) -> Dict: __UpperCamelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __UpperCamelCase : List[str] = init_image.resize((7_6_8, 5_1_2) ) __UpperCamelCase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) __UpperCamelCase : Tuple = "BAAI/AltDiffusion" __UpperCamelCase : int = AltDiffusionImgaImgPipeline.from_pretrained( __lowerCAmelCase , safety_checker=__lowerCAmelCase , ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) pipe.enable_attention_slicing() __UpperCamelCase : Optional[int] = "A fantasy landscape, trending on artstation" __UpperCamelCase : List[str] = torch.manual_seed(0 ) __UpperCamelCase : List[Any] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCAmelCase , output_type="np" , ) __UpperCamelCase : Dict = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
232
def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' return "".join(chr(ord(__snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
209
0
'''simple docstring''' from __future__ import annotations __snake_case = [True] * 1000001 __snake_case = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): __snake_case = False i += 1 def a ( __a ) -> bool: '''simple docstring''' return seive[n] def a ( __a ) -> bool: '''simple docstring''' return any(digit in '''02468''' for digit in str(__a ) ) def a ( __a = 1000000 ) -> list[int]: '''simple docstring''' UpperCamelCase__ :Any = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(__a ) and not contains_an_even_digit(__a ): UpperCamelCase__ :str = str(__a ) UpperCamelCase__ :List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )] if all(is_prime(__a ) for i in list_nums ): result.append(__a ) return result def a ( ) -> int: '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(F"""{len(find_circular_primes()) = }""")
219
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( A__ ): """simple docstring""" _a = (DDIMParallelScheduler,) _a = (('eta', 0.0), ('num_inference_steps', 50)) def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = self.scheduler_classes[0] UpperCamelCase__ :Optional[Any] = self.get_scheduler_config(**UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = scheduler_class(**UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :Optional[int] = 10, 0.0 UpperCamelCase__ :List[str] = self.dummy_model() UpperCamelCase__ :List[str] = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase_ ) for t in scheduler.timesteps: UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample return sample def lowerCAmelCase__ ( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.scheduler_classes[0] UpperCamelCase__ :List[str] = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase__ :str = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = self.scheduler_classes[0] UpperCamelCase__ :List[str] = self.get_scheduler_config() UpperCamelCase__ :int = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.scheduler_classes[0] UpperCamelCase__ :Dict = self.get_scheduler_config() UpperCamelCase__ :Any = scheduler_class(**UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :List[str] = 10, 0.0 scheduler.set_timesteps(UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = self.dummy_model() UpperCamelCase__ :List[Any] = self.dummy_sample_deter UpperCamelCase__ :int = self.dummy_sample_deter + 0.1 UpperCamelCase__ :List[str] = self.dummy_sample_deter - 0.1 UpperCamelCase__ :str = samplea.shape[0] UpperCamelCase__ :Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase__ :List[Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ ) UpperCamelCase__ :Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase__ :Any = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :Any = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :int = self.full_loop() UpperCamelCase__ :List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :Any = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.full_loop(prediction_type='''v_prediction''' ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :str = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) UpperCamelCase__ :str = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :List[str] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
219
1
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowercase__ ( _UpperCAmelCase ): A__ : Dict ="""efficientformer""" def __init__( self : Optional[Any] , UpperCAmelCase_ : List[int] = [3, 2, 6, 4] , UpperCAmelCase_ : List[int] = [48, 96, 224, 448] , UpperCAmelCase_ : List[bool] = [True, True, True, True] , UpperCAmelCase_ : int = 448 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 7 , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : float = 1e-5 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1e-1_2 , UpperCAmelCase_ : int = 224 , UpperCAmelCase_ : float = 1e-0_5 , **UpperCAmelCase_ : List[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = hidden_sizes SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = depths SCREAMING_SNAKE_CASE__ = mlp_expansion_ratio SCREAMING_SNAKE_CASE__ = downsamples SCREAMING_SNAKE_CASE__ = dim SCREAMING_SNAKE_CASE__ = key_dim SCREAMING_SNAKE_CASE__ = attention_ratio SCREAMING_SNAKE_CASE__ = resolution SCREAMING_SNAKE_CASE__ = pool_size SCREAMING_SNAKE_CASE__ = downsample_patch_size SCREAMING_SNAKE_CASE__ = downsample_stride SCREAMING_SNAKE_CASE__ = downsample_pad SCREAMING_SNAKE_CASE__ = drop_path_rate SCREAMING_SNAKE_CASE__ = num_metaad_blocks SCREAMING_SNAKE_CASE__ = distillation SCREAMING_SNAKE_CASE__ = use_layer_scale SCREAMING_SNAKE_CASE__ = layer_scale_init_value SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = batch_norm_eps
176
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError('iterations must be defined as integers' ) if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) SCREAMING_SNAKE_CASE__ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
176
1
def UpperCamelCase( lowercase_ ) -> List[str]: '''simple docstring''' snake_case_ = [] snake_case_ = set({"""(""", """[""", """{"""} ) snake_case_ = set({""")""", """]""", """}"""} ) snake_case_ = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(lowercase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowercase_ ) == 0 def UpperCamelCase( ) -> Optional[Any]: '''simple docstring''' snake_case_ = input("""Enter sequence of brackets: """ ) if is_balanced(lowercase_ ): print(lowercase_ , """is balanced""" ) else: print(lowercase_ , """is not balanced""" ) if __name__ == "__main__": main()
34
from __future__ import annotations def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
34
1
"""simple docstring""" def _A ( lowercase , lowercase = " " ): """simple docstring""" a =[] a =0 for index, char in enumerate(lowercase ): if char == separator: split_words.append(string[last_index:index] ) a =index + 1 elif index + 1 == len(lowercase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
81
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_: def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Optional[int]=[2, 2, 4] , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Dict=8 , ): lowerCAmelCase : Union[str, Any] = parent lowerCAmelCase : int = batch_size lowerCAmelCase : List[str] = image_size lowerCAmelCase : Union[str, Any] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Any = embed_dim lowerCAmelCase : Any = depths lowerCAmelCase : Any = num_heads lowerCAmelCase : int = window_size lowerCAmelCase : List[Any] = mlp_ratio lowerCAmelCase : int = qkv_bias lowerCAmelCase : Optional[Any] = hidden_dropout_prob lowerCAmelCase : str = attention_probs_dropout_prob lowerCAmelCase : str = drop_path_rate lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : int = use_absolute_embeddings lowerCAmelCase : Union[str, Any] = patch_norm lowerCAmelCase : int = layer_norm_eps lowerCAmelCase : str = initializer_range lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = scope lowerCAmelCase : List[str] = use_labels lowerCAmelCase : str = type_sequence_label_size lowerCAmelCase : Union[str, Any] = encoder_stride def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : Union[str, Any] = None if self.use_labels: lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Tuple = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : List[Any] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ): lowerCAmelCase : List[str] = SwinvaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ ) lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : int = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = self.type_sequence_label_size lowerCAmelCase : Optional[Any] = SwinvaForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = config_and_inputs lowerCAmelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __UpperCamelCase = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = SwinvaModelTester(self ) lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 ) def lowerCamelCase__ ( self : Optional[int] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def lowerCamelCase__ ( self : Dict ): pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def lowerCamelCase__ ( self : int ): pass def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Dict = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: lowerCAmelCase : Any = True lowerCAmelCase : List[str] = False lowerCAmelCase : int = True lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : str = outputs.attentions lowerCAmelCase : int = len(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase : Any = True lowerCAmelCase : Union[str, Any] = config.window_size**2 lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase : str = len(UpperCamelCase_ ) # Check attention is always last and order is fine lowerCAmelCase : Optional[int] = True lowerCAmelCase : int = True lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): lowerCAmelCase : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase : Union[str, Any] = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) ) lowerCAmelCase : List[str] = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : str = outputs.hidden_states lowerCAmelCase : List[str] = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # Swinv2 has a different seq_length lowerCAmelCase : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = reshaped_hidden_states[0].shape lowerCAmelCase : Optional[Any] = ( reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Tuple = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Dict = 3 lowerCAmelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase : str = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Optional[int] = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : int ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : int = SwinvaModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase_ ) for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(config=UpperCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : Dict ): return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.default_image_processor lowerCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : Dict = model(**UpperCamelCase_ ) # verify the logits lowerCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCAmelCase : Any = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
60
0
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowercase_ ( __snake_case ): def UpperCamelCase ( self ): _snake_case : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowercase_ , "neck_hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowercase_ , "num_attention_heads" ) ) class lowercase_ : def __init__( self , lowercase_ , lowercase_=13 , lowercase_=32 , lowercase_=2 , lowercase_=3 , lowercase_=640 , lowercase_=4 , lowercase_="silu" , lowercase_=3 , lowercase_=32 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=True , lowercase_=True , lowercase_=10 , lowercase_=None , ): _snake_case : str = parent _snake_case : Union[str, Any] = batch_size _snake_case : List[str] = image_size _snake_case : Union[str, Any] = patch_size _snake_case : Dict = num_channels _snake_case : Union[str, Any] = last_hidden_size _snake_case : Dict = num_attention_heads _snake_case : str = hidden_act _snake_case : Union[str, Any] = conv_kernel_size _snake_case : int = output_stride _snake_case : Any = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Tuple = classifier_dropout_prob _snake_case : Dict = use_labels _snake_case : Union[str, Any] = is_training _snake_case : Dict = num_labels _snake_case : str = initializer_range _snake_case : Union[str, Any] = scope def UpperCamelCase ( self ): _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Dict = None _snake_case : Any = None if self.use_labels: _snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels ) _snake_case : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _snake_case : List[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase ( self ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): _snake_case : List[str] = MobileViTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() _snake_case : int = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): _snake_case : List[str] = self.num_labels _snake_case : str = MobileViTForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() _snake_case : str = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): _snake_case : Dict = self.num_labels _snake_case : Dict = MobileViTForSemanticSegmentation(lowercase_ ) model.to(lowercase_ ) model.eval() _snake_case : Tuple = model(lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _snake_case : Optional[Any] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.prepare_config_and_inputs() _snake_case : List[str] = config_and_inputs _snake_case : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ): _lowerCamelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) _lowerCamelCase = ( { 'feature-extraction': MobileViTModel, 'image-classification': MobileViTForImageClassification, 'image-segmentation': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def UpperCamelCase ( self ): _snake_case : Dict = MobileViTModelTester(self ) _snake_case : List[Any] = MobileViTConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds" ) def UpperCamelCase ( self ): pass @unittest.skip(reason="MobileViT does not support input and output embeddings" ) def UpperCamelCase ( self ): pass @unittest.skip(reason="MobileViT does not output attentions" ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(lowercase_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Any = [*signature.parameters.keys()] _snake_case : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase ( self ): def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ): _snake_case : Any = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): _snake_case : Union[str, Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) _snake_case : List[str] = outputs.hidden_states _snake_case : List[Any] = 5 self.assertEqual(len(lowercase_ ) , lowercase_ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _snake_case : Dict = 2 for i in range(len(lowercase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : str = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def UpperCamelCase ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ ) @slow def UpperCamelCase ( self ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = MobileViTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def snake_case () -> Optional[Any]: '''simple docstring''' _snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ): return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None @slow def UpperCamelCase ( self ): _snake_case : Union[str, Any] = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowercase_ ) _snake_case : Tuple = self.default_image_processor _snake_case : Tuple = prepare_img() _snake_case : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**lowercase_ ) # verify the logits _snake_case : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) _snake_case : str = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) ) @slow def UpperCamelCase ( self ): _snake_case : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) _snake_case : Tuple = model.to(lowercase_ ) _snake_case : List[str] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) _snake_case : Tuple = prepare_img() _snake_case : Optional[Any] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): _snake_case : Tuple = model(**lowercase_ ) _snake_case : Tuple = outputs.logits # verify the logits _snake_case : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowercase_ ) _snake_case : Dict = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ] , device=lowercase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) ) @slow def UpperCamelCase ( self ): _snake_case : Dict = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) _snake_case : Optional[Any] = model.to(lowercase_ ) _snake_case : List[str] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" ) _snake_case : int = prepare_img() _snake_case : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): _snake_case : List[Any] = model(**lowercase_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] ) _snake_case : Any = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowercase_ ) _snake_case : str = image_processor.post_process_semantic_segmentation(outputs=lowercase_ ) _snake_case : Dict = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowercase_ )
366
def snake_case (__lowercase ) -> bool: '''simple docstring''' _snake_case : Dict = 0 for ch in input_str: _snake_case : int = ord(__lowercase ) _snake_case : List[Any] = pow(2 , __lowercase ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
284
0
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } A, A = input_paths_and_base_extractors[compression_format] if input_path is None: A = F"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) assert base_extractor.is_extractable(UpperCamelCase ) A = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(UpperCamelCase , UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding="utf-8" ) else: A = output_path.read_text(encoding="utf-8" ) A = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): A = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } A = input_paths[compression_format] if input_path is None: A = F"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase ) A = Extractor.infer_extractor_format(UpperCamelCase ) assert extractor_format is not None A = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding="utf-8" ) else: A = output_path.read_text(encoding="utf-8" ) A = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def A__ ( UpperCamelCase , UpperCamelCase ): import tarfile A = tmp_path / "data_dot_dot" directory.mkdir() A = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(UpperCamelCase , "w" ) as f: f.add(UpperCamelCase , arcname=os.path.join(".." , text_file.name ) ) return path @pytest.fixture def A__ ( UpperCamelCase ): import tarfile A = tmp_path / "data_sym_link" directory.mkdir() A = directory / "tar_file_with_sym_link.tar" os.symlink(".." , directory / "subdir" , target_is_directory=UpperCamelCase ) with tarfile.TarFile(UpperCamelCase , "w" ) as f: f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } A = insecure_tar_files[insecure_tar_file] A = tmp_path / "extracted" TarExtractor.extract(UpperCamelCase , UpperCamelCase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def A__ ( UpperCamelCase ): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number A = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 A = ( B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(UpperCamelCase ) assert zipfile.is_zipfile(str(UpperCamelCase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(UpperCamelCase ) # but we're right
292
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
1
"""simple docstring""" def A__ ( SCREAMING_SNAKE_CASE__) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
364
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( __lowerCamelCase ): '''simple docstring''' def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ): super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case: Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def UpperCAmelCase__ ( self : str ): self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ): if isinstance(A , A ): __snake_case: int = 1 elif isinstance(A , A ): __snake_case: Optional[Any] = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings __snake_case: Tuple = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __snake_case: Any = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape __snake_case: Tuple = text_embeddings.repeat(1 , A , 1 ) __snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case: List[str] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case: List[str] if negative_prompt is None: __snake_case: Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): __snake_case: List[str] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: __snake_case: str = negative_prompt __snake_case: Any = text_input_ids.shape[-1] __snake_case: Dict = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) __snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case: Optional[Any] = uncond_embeddings.shape[1] __snake_case: str = uncond_embeddings.repeat(A , A , 1 ) __snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __snake_case: Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case: Any = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) __snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: __snake_case: Dict = torch.randn( A , generator=A , device=self.device , dtype=A ) __snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __snake_case: Optional[int] = latents_reference.to(self.device ) __snake_case: List[str] = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2 __snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 __snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __snake_case: List[Any] = 0 if dx < 0 else dx __snake_case: Dict = 0 if dy < 0 else dy __snake_case: List[str] = max(-dx , 0 ) __snake_case: int = max(-dy , 0 ) # import pdb # pdb.set_trace() __snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case: str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case: int = {} if accepts_eta: __snake_case: Optional[Any] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance __snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case: Dict = self.scheduler.scale_model_input(A , A ) # predict the noise residual __snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case: Any = noise_pred.chunk(2 ) __snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) __snake_case: Optional[int] = 1 / 0.1_8215 * latents __snake_case: List[Any] = self.vae.decode(A ).sample __snake_case: str = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) __snake_case , __snake_case: List[str] = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __snake_case: Optional[int] = None if output_type == "pil": __snake_case: Tuple = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
293
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __A =list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __A =[file for file in filepaths if file != file.lower()] if upper_files: print(f'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __A =[file for file in filepaths if " " in file] if space_files: print(f'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __A =[file for file in filepaths if "-" in file] if hyphen_files: print(f'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __A =[file for file in filepaths if os.sep not in file] if nodir_files: print(f'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __A =len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
226
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a ( *_UpperCAmelCase : List[str] ): '''simple docstring''' with open(_UpperCAmelCase , '''r''' ) as fh: fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX ) try: print(*_UpperCAmelCase ) finally: fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN ) __A =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __A =torch.device("cuda", local_rank) __A =socket.gethostname() __A =f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __A =dist.get_rank() __A =dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
226
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase : List[str] = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
1
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any: # Initialise PyTorch model UpperCamelCase = RemBertConfig.from_json_file(__UpperCamelCase ) print("""Building PyTorch model from configuration: {}""".format(str(__UpperCamelCase ) ) ) UpperCamelCase = RemBertModel(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(__UpperCamelCase ) ) torch.save(model.state_dict() , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
321
'''simple docstring''' def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool: # Base Case if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]: UpperCamelCase = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
321
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case : Any =logging.get_logger(__name__) __snake_case : List[str] ={ 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE): '''simple docstring''' snake_case_ ="efficientnet" def __init__(self ,__lowerCamelCase = 3 ,__lowerCamelCase = 6_00 ,__lowerCamelCase = 2.0 ,__lowerCamelCase = 3.1 ,__lowerCamelCase = 8 ,__lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] ,__lowerCamelCase = [32, 16, 24, 40, 80, 1_12, 1_92] ,__lowerCamelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,__lowerCamelCase = [] ,__lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] ,__lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] ,__lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] ,__lowerCamelCase = 0.25 ,__lowerCamelCase = "swish" ,__lowerCamelCase = 25_60 ,__lowerCamelCase = "mean" ,__lowerCamelCase = 0.02 ,__lowerCamelCase = 0.001 ,__lowerCamelCase = 0.99 ,__lowerCamelCase = 0.5 ,__lowerCamelCase = 0.2 ,**__lowerCamelCase ,) -> int: """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase__ : Any = num_channels lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = width_coefficient lowerCAmelCase__ : Any = depth_coefficient lowerCAmelCase__ : Union[str, Any] = depth_divisor lowerCAmelCase__ : Tuple = kernel_sizes lowerCAmelCase__ : Tuple = in_channels lowerCAmelCase__ : Any = out_channels lowerCAmelCase__ : int = depthwise_padding lowerCAmelCase__ : List[Any] = strides lowerCAmelCase__ : Any = num_block_repeats lowerCAmelCase__ : Optional[Any] = expand_ratios lowerCAmelCase__ : str = squeeze_expansion_ratio lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dim lowerCAmelCase__ : List[str] = pooling_type lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : List[str] = batch_norm_eps lowerCAmelCase__ : Optional[int] = batch_norm_momentum lowerCAmelCase__ : str = dropout_rate lowerCAmelCase__ : List[Any] = drop_connect_rate lowerCAmelCase__ : Optional[int] = sum(_snake_case ) * 4 class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE): '''simple docstring''' snake_case_ =version.parse("""1.11""") @property def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" return 1e-5
351
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000): '''simple docstring''' lowerCAmelCase__ : int = set(range(3 ,lowerCamelCase_ ,2)) primes.add(2) for p in range(3 ,lowerCamelCase_ ,2): if p not in primes: continue primes.difference_update(set(range(p * p ,lowerCamelCase_ ,lowerCamelCase_))) lowerCAmelCase__ : int = [float(lowerCamelCase_) for n in range(limit + 1)] for p in primes: for n in range(lowerCamelCase_ ,limit + 1 ,lowerCamelCase_): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(f"""{solution() = }""")
94
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _A = logging.get_logger(__name__) # pylint: disable=invalid-name _A = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def lowerCamelCase__ ( a__ : Optional[Any] , a__ : List[Any] , a__ : int=8 ) -> str: UpperCamelCase_ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCamelCase_ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowercase_ ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): """simple docstring""" super().__init__() self.register_modules( unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , ) UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if latents is None: UpperCamelCase_ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) UpperCamelCase_ = latents.to(__UpperCamelCase ) UpperCamelCase_ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self , __UpperCamelCase=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) UpperCamelCase_ = torch.device(f'''cuda:{gpu_id}''' ) UpperCamelCase_ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) UpperCamelCase_ = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=__UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCamelCase_ = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase ) # We'll offload the last model manually. UpperCamelCase_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self ): """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCamelCase ) def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 5_1_2 , __UpperCamelCase = 5_1_2 , __UpperCamelCase = 1_0_0 , __UpperCamelCase = 4.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , ): """simple docstring""" UpperCamelCase_ = self._execution_device UpperCamelCase_ = guidance_scale > 1.0 if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = torch.cat(__UpperCamelCase , dim=0 ) UpperCamelCase_ = image_embeds.shape[0] * num_images_per_prompt if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = torch.cat(__UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCamelCase_ = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) UpperCamelCase_ = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 ) UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase ) self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase ) UpperCamelCase_ = self.scheduler.timesteps UpperCamelCase_ = self.unet.config.in_channels UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor ) # create initial latent UpperCamelCase_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase_ = {"""image_embeds""": image_embeds} UpperCamelCase_ = self.unet( sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0] if do_classifier_free_guidance: UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 ) UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 ) UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase_ = self.scheduler.step( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0] # post-processing UpperCamelCase_ = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: UpperCamelCase_ = image * 0.5 + 0.5 UpperCamelCase_ = image.clamp(0 , 1 ) UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase_ = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
122
import colorsys from PIL import Image # type: ignore def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float: UpperCamelCase_ = x UpperCamelCase_ = y for step in range(a__ ): # noqa: B007 UpperCamelCase_ = a * a - b * b + x UpperCamelCase_ = 2 * a * b + y UpperCamelCase_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase__ ( a__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase__ ( a__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) ) def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image: UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) ) UpperCamelCase_ = img.load() # loop through the image-coordinates for image_x in range(a__ ): for image_y in range(a__ ): # determine the figure-coordinates based on the image-coordinates UpperCamelCase_ = figure_width / image_width * image_height UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCamelCase_ = get_distance(a__ , a__ , a__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCamelCase_ = get_color_coded_rgb(a__ ) else: UpperCamelCase_ = get_black_and_white_rgb(a__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _A = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
122
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class a ( unittest.TestCase ): def _UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) _UpperCAmelCase : int = "A painting of a squirrel eating a burger" _UpperCAmelCase : Optional[int] = jax.device_count() _UpperCAmelCase : Tuple = num_samples * [prompt] _UpperCAmelCase : Union[str, Any] = sd_pipe.prepare_inputs(__UpperCamelCase ) _UpperCAmelCase : List[Any] = replicate(__UpperCamelCase ) _UpperCAmelCase : Optional[Any] = shard(__UpperCamelCase ) _UpperCAmelCase : Dict = jax.random.PRNGKey(0 ) _UpperCAmelCase : Tuple = jax.random.split(__UpperCamelCase , jax.device_count() ) _UpperCAmelCase : int = sd_pipe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_inference_steps=25 , jit=__UpperCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _UpperCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _UpperCAmelCase : Optional[int] = images[0, 253:256, 253:256, -1] _UpperCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _UpperCAmelCase : str = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : Any = "stabilityai/stable-diffusion-2" _UpperCAmelCase , _UpperCAmelCase : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCamelCase , subfolder="scheduler" ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( __UpperCamelCase , scheduler=__UpperCamelCase , revision="bf16" , dtype=jnp.bfloataa , ) _UpperCAmelCase : str = scheduler_params _UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger" _UpperCAmelCase : Any = jax.device_count() _UpperCAmelCase : Union[str, Any] = num_samples * [prompt] _UpperCAmelCase : Optional[Any] = sd_pipe.prepare_inputs(__UpperCamelCase ) _UpperCAmelCase : str = replicate(__UpperCamelCase ) _UpperCAmelCase : Any = shard(__UpperCamelCase ) _UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 ) _UpperCAmelCase : List[Any] = jax.random.split(__UpperCamelCase , jax.device_count() ) _UpperCAmelCase : List[Any] = sd_pipe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_inference_steps=25 , jit=__UpperCamelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _UpperCAmelCase : int = images[0, 253:256, 253:256, -1] _UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _UpperCAmelCase : Optional[Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
360
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a : def __init__( self , A_ = None ): '''simple docstring''' if components is None: _UpperCAmelCase : Dict = [] _UpperCAmelCase : Dict = list(A_ ) def __len__( self ): '''simple docstring''' return len(self.__components ) def __str__( self ): '''simple docstring''' return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ): '''simple docstring''' _UpperCAmelCase : int = len(self ) if size == len(A_ ): _UpperCAmelCase : Union[str, Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception("must have the same size" ) def __sub__( self , A_ ): '''simple docstring''' _UpperCAmelCase : List[str] = len(self ) if size == len(A_ ): _UpperCAmelCase : Optional[Any] = [self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self , A_ ): '''simple docstring''' ... @overload def __mul__( self , A_ ): '''simple docstring''' ... def __mul__( self , A_ ): '''simple docstring''' if isinstance(A_ , (float, int) ): _UpperCAmelCase : str = [c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): _UpperCAmelCase : int = len(self ) _UpperCAmelCase : Any = [self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception("invalid operand!" ) def _UpperCAmelCase ( self ): '''simple docstring''' return Vector(self.__components ) def _UpperCAmelCase ( self , A_ ): '''simple docstring''' if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def _UpperCAmelCase ( self , A_ , A_ ): '''simple docstring''' assert -len(self.__components ) <= pos < len(self.__components ) _UpperCAmelCase : Any = value def _UpperCAmelCase ( self ): '''simple docstring''' if len(self.__components ) == 0: raise Exception("Vector is empty" ) _UpperCAmelCase : Dict = [c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _UpperCAmelCase ( self , A_ , A_ = False ): '''simple docstring''' _UpperCAmelCase : List[Any] = self * other _UpperCAmelCase : str = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Vector: assert isinstance(lowerCAmelCase , lowerCAmelCase ) return Vector([0] * dimension ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase )) _UpperCAmelCase : Tuple = [0] * dimension _UpperCAmelCase : Optional[Any] = 1 return Vector(lowerCAmelCase ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: Vector , lowerCAmelCase: Vector ) -> Vector: assert ( isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , (int, float) )) ) return x * scalar + y def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector: random.seed(lowerCAmelCase ) _UpperCAmelCase : Optional[int] = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] return Vector(lowerCAmelCase ) class a : def __init__( self , A_ , A_ , A_ ): '''simple docstring''' _UpperCAmelCase : List[Any] = matrix _UpperCAmelCase : Union[str, Any] = w _UpperCAmelCase : str = h def __str__( self ): '''simple docstring''' _UpperCAmelCase : Optional[int] = "" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): _UpperCAmelCase : Optional[int] = [] for i in range(self.__height ): _UpperCAmelCase : List[str] = [ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self , A_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): _UpperCAmelCase : Optional[Any] = [] for i in range(self.__height ): _UpperCAmelCase : Tuple = [ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self , A_ ): '''simple docstring''' ... @overload def __mul__( self , A_ ): '''simple docstring''' ... def __mul__( self , A_ ): '''simple docstring''' if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: _UpperCAmelCase : Union[str, Any] = zero_vector(self.__height ) for i in range(self.__height ): _UpperCAmelCase : List[Any] = [ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(A_ , (int, float) ): # matrix-scalar _UpperCAmelCase : Any = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _UpperCAmelCase ( self ): '''simple docstring''' return self.__height def _UpperCAmelCase ( self ): '''simple docstring''' return self.__width def _UpperCAmelCase ( self , A_ , A_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def _UpperCAmelCase ( self , A_ , A_ , A_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: _UpperCAmelCase : Tuple = value else: raise Exception("change_component: indices out of bounds" ) def _UpperCAmelCase ( self , A_ , A_ ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) _UpperCAmelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): _UpperCAmelCase : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _UpperCAmelCase ( self , A_ , A_ ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception("Indices out of bounds" ) def _UpperCAmelCase ( self ): '''simple docstring''' if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: _UpperCAmelCase : Dict = [ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Matrix: _UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase )] return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Matrix: random.seed(lowerCAmelCase ) _UpperCAmelCase : list[list[float]] = [ [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase ) ] return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
189
0
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCAmelCase__ (lowerCAmelCase_="" ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() return os.path.join(lowerCAmelCase_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 __SCREAMING_SNAKE_CASE = AgentAudio(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) # Ensure that the file contains the same value as the original tensor __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , atol=1E-4 ) ) def UpperCAmelCase_ ( self : Tuple ) -> str: __SCREAMING_SNAKE_CASE = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 __SCREAMING_SNAKE_CASE = get_new_path(suffix=".wav" ) sf.write(UpperCAmelCase__ , UpperCAmelCase__ , 1_6_0_0_0 ) __SCREAMING_SNAKE_CASE = AgentAudio(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , UpperCAmelCase__ ) @require_vision @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) ) __SCREAMING_SNAKE_CASE = AgentImage(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __SCREAMING_SNAKE_CASE = Image.open(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = AgentImage(UpperCAmelCase__ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" __SCREAMING_SNAKE_CASE = Image.open(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = AgentImage(UpperCAmelCase__ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : int ) -> Tuple: __SCREAMING_SNAKE_CASE = "Hey!" __SCREAMING_SNAKE_CASE = AgentText(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , agent_type.to_string() ) self.assertEqual(UpperCAmelCase__ , agent_type.to_raw() ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
54
"""simple docstring""" from jiwer import compute_measures import datasets a__ : Optional[int] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' a__ : List[str] = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' a__ : Dict = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : List[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=False ) -> Optional[int]: if concatenate_texts: return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"] else: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
54
1
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
368
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : '''simple docstring''' def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : Dict = image_size lowerCamelCase__ : List[str] = patch_size lowerCamelCase__ : Union[str, Any] = num_channels lowerCamelCase__ : str = is_training lowerCamelCase__ : Any = use_labels lowerCamelCase__ : Tuple = hidden_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : Dict = num_attention_heads lowerCamelCase__ : Union[str, Any] = intermediate_size lowerCamelCase__ : Any = hidden_act lowerCamelCase__ : Dict = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = scope lowerCamelCase__ : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[int] = num_patches + 2 def a__ (self ): '''simple docstring''' lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = None if self.use_labels: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase__ : List[str] = self.get_config() return config, pixel_values, labels def a__ (self ): '''simple docstring''' return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ ) lowerCamelCase__ : Dict = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ ) lowerCamelCase__ : Any = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : Tuple = 1 lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ ) lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : int = self.type_sequence_label_size lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ ) lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ ) lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs lowerCamelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Any = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCamelCase__ : Tuple = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Optional[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 ) def a__ (self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def a__ (self ): '''simple docstring''' pass def a__ (self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) lowerCamelCase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) ) def a__ (self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Dict = model_class(lowerCamelCase_ ) lowerCamelCase__ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : str = [*signature.parameters.keys()] lowerCamelCase__ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1], lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def a__ (self ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowerCamelCase_ ( ): lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): '''simple docstring''' @cached_property def a__ (self ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def a__ (self ): '''simple docstring''' lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) lowerCamelCase__ : List[Any] = self.default_image_processor lowerCamelCase__ : Union[str, Any] = prepare_img() lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' ) # forward pass lowerCamelCase__ : Tuple = model(**lowerCamelCase_ ) # verify the logits lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, lowerCamelCase_ ) lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
316
0